code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : str = PriorTransformer
__a : Tuple = '''hidden_states'''
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 4
__lowercase = 8
__lowercase = 7
__lowercase = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
__lowercase = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
__lowercase = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(lowerCAmelCase__ )
__lowercase = 4
__lowercase = 8
__lowercase = 7
__lowercase = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
__lowercase = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
__lowercase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return (4, 8)
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return (4, 8)
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase__ )
__lowercase = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.model_class(**lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
__lowercase = model.to(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
__lowercase = self.get_dummy_seed_input()
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )[0]
__lowercase = output[0, :5].flatten().cpu()
print(lowerCAmelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-2 ) )
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=1 , lowerCAmelCase__=7_68 , lowerCAmelCase__=77 , lowerCAmelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(lowerCAmelCase__ )
__lowercase = batch_size
__lowercase = embedding_dim
__lowercase = num_embeddings
__lowercase = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
__lowercase = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
__lowercase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(lowerCAmelCase__ )
__lowercase = self.get_dummy_seed_input(seed=lowerCAmelCase__ )
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )[0]
assert list(sample.shape ) == [1, 7_68]
__lowercase = sample[0, :8].flatten().cpu()
print(lowerCAmelCase__ )
__lowercase = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) | 210 | from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
__lowercase = precision
__lowercase = ceil(precision / 14 )
__lowercase = 426880 * Decimal(10005 ).sqrt()
__lowercase = 1
__lowercase = 13591409
__lowercase = Decimal(lowercase )
for k in range(1 , lowercase ):
__lowercase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a : Optional[Any] = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''') | 210 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=7 , lowercase_ : Tuple=3 , lowercase_ : Tuple=30 , lowercase_ : Union[str, Any]=400 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=None , lowercase_ : Optional[int]=True , lowercase_ : Tuple=[0.5, 0.5, 0.5] , lowercase_ : Any=[0.5, 0.5, 0.5] , lowercase_ : Any=True , lowercase_ : Union[str, Any]=1 / 255 , lowercase_ : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowercase_ : Dict = parent
lowercase_ : Optional[Any] = batch_size
lowercase_ : Optional[int] = num_channels
lowercase_ : Dict = min_resolution
lowercase_ : int = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[int] = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : Union[str, Any] = image_mean
lowercase_ : List[Any] = image_std
lowercase_ : str = do_rescale
lowercase_ : List[Any] = rescale_factor
lowercase_ : Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE_ ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Dict=False ):
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowercase_ , lowercase_ : Optional[int] = image.size
else:
lowercase_ , lowercase_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
lowercase_ : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowercase_ : Any = self.size["""shortest_edge"""]
elif w > h:
lowercase_ : Optional[Any] = self.size["""shortest_edge"""]
lowercase_ : Tuple = int(self.size["""shortest_edge"""] * w / h )
else:
lowercase_ : Union[str, Any] = self.size["""shortest_edge"""]
lowercase_ : str = self.size["""shortest_edge"""]
else:
lowercase_ : int = []
for image in image_inputs:
lowercase_ , lowercase_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowercase_ : int = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowercase_ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
# Initialize image_processing
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Dict = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
# prepare image and target
lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowercase_ : Tuple = json.loads(f.read() )
lowercase_ : Union[str, Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowercase_ : Tuple = DeformableDetrImageProcessor()
lowercase_ : Optional[Any] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowercase_ : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowercase_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
lowercase_ : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowercase_ : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
lowercase_ : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowercase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowercase_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowercase_ : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# prepare image, target and masks_path
lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowercase_ : Union[str, Any] = json.loads(f.read() )
lowercase_ : str = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowercase_ : Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowercase_ : Optional[Any] = DeformableDetrImageProcessor(format="""coco_panoptic""" )
lowercase_ : int = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowercase_ : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowercase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
lowercase_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowercase_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowercase_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowercase_ : List[str] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowercase_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowercase_ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 21 | '''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> int:
if not postfix_notation:
return 0
lowercase_ : Any = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __UpperCamelCase ( a__ ):
lowerCamelCase : str
lowerCamelCase : int
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list[str]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->BWTTransformDict:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
a : Optional[int] = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowercase ),
}
return response
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
a : Tuple = int(_lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
a : Any = [""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
a : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a : Dict = '''Provide a string that I will generate its BWT transform: '''
a : Any = input(entry_msg).strip()
a : str = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
a : int = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 105 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Any , lowercase : Dict , lowercase : List[str]=100 , lowercase : List[Any]=13 , lowercase : Optional[Any]=30 , lowercase : Any=2 , lowercase : List[str]=3 , lowercase : List[Any]=True , lowercase : str=True , lowercase : List[Any]=32 , lowercase : List[str]=5 , lowercase : str=4 , lowercase : Optional[int]=37 , lowercase : Optional[int]="gelu" , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[str]=10 , lowercase : Tuple=0.02 , lowercase : List[Any]=3 , ):
"""simple docstring"""
lowercase_ :Tuple = parent
lowercase_ :Union[str, Any] = vocab_size
lowercase_ :Dict = batch_size
lowercase_ :str = image_size
lowercase_ :int = patch_size
lowercase_ :Dict = num_channels
lowercase_ :Tuple = is_training
lowercase_ :int = use_labels
lowercase_ :str = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :Tuple = num_attention_heads
lowercase_ :str = intermediate_size
lowercase_ :int = hidden_act
lowercase_ :int = hidden_dropout_prob
lowercase_ :Union[str, Any] = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = type_sequence_label_size
lowercase_ :Any = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ :Tuple = (image_size // patch_size) ** 2
lowercase_ :Union[str, Any] = num_patches + 1
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ :Any = None
if self.use_labels:
lowercase_ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :List[Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] , lowercase : str , lowercase : Union[str, Any] , lowercase : Any ):
"""simple docstring"""
lowercase_ :str = FlaxBeitModel(config=_a )
lowercase_ :List[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :int = FlaxBeitForMaskedImageModeling(config=_a )
lowercase_ :Any = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : Union[str, Any] , lowercase : str , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :List[str] = self.type_sequence_label_size
lowercase_ :Optional[Any] = FlaxBeitForImageClassification(config=_a )
lowercase_ :Tuple = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ :Dict = 1
lowercase_ :Any = FlaxBeitForImageClassification(_a )
lowercase_ :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :Optional[Any] = model(_a )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :str = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :Optional[int] = config_and_inputs
lowercase_ :Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = FlaxBeitModelTester(self )
lowercase_ :Any = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ , lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Tuple = model_class(_a )
lowercase_ :str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Optional[int] = [*signature.parameters.keys()]
lowercase_ :Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ , lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ :Optional[int] = self._prepare_for_class(_a , _a )
lowercase_ :Optional[int] = model_class(_a )
@jax.jit
def model_jitted(lowercase : Any , **lowercase : Any ):
return model(pixel_values=_a , **_a )
with self.subTest("JIT Enabled" ):
lowercase_ :str = model_jitted(**_a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase_ :Union[str, Any] = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ :int = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase_ :int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ( ):
lowercase_ :int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
lowercase_ :Any = self.default_image_processor
lowercase_ :Union[str, Any] = prepare_img()
lowercase_ :List[str] = image_processor(images=_a , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
lowercase_ :List[str] = np.ones((1, 196) , dtype=_a )
# forward pass
lowercase_ :List[str] = model(pixel_values=_a , bool_masked_pos=_a )
lowercase_ :List[Any] = outputs.logits
# verify the logits
lowercase_ :Union[str, Any] = (1, 196, 8_192)
self.assertEqual(logits.shape , _a )
lowercase_ :int = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _a , atol=1e-2 ) )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :int = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase_ :int = self.default_image_processor
lowercase_ :Optional[int] = prepare_img()
lowercase_ :Tuple = image_processor(images=_a , return_tensors="np" )
# forward pass
lowercase_ :Optional[Any] = model(**_a )
lowercase_ :str = outputs.logits
# verify the logits
lowercase_ :Optional[int] = (1, 1_000)
self.assertEqual(logits.shape , _a )
lowercase_ :Tuple = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , _a , atol=1e-4 ) )
lowercase_ :Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
lowercase_ :List[Any] = self.default_image_processor
lowercase_ :int = prepare_img()
lowercase_ :Any = image_processor(images=_a , return_tensors="np" )
# forward pass
lowercase_ :Optional[Any] = model(**_a )
lowercase_ :Dict = outputs.logits
# verify the logits
lowercase_ :Dict = (1, 21_841)
self.assertEqual(logits.shape , _a )
lowercase_ :int = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , _a , atol=1e-4 ) )
lowercase_ :Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _a )
| 363 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
if isinstance(__lowerCamelCase ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class a_ ( _lowerCAmelCase ):
__A = ["pixel_values"]
def __init__( self : List[str] , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Tuple , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Any = size if size is not None else {"shortest_edge": 256}
lowercase_ :int = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :str = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase_ :List[str] = get_size_dict(lowercase , param_name="crop_size" )
lowercase_ :List[str] = do_resize
lowercase_ :Any = size
lowercase_ :Union[str, Any] = do_center_crop
lowercase_ :Union[str, Any] = crop_size
lowercase_ :Optional[Any] = resample
lowercase_ :List[str] = do_rescale
lowercase_ :List[Any] = rescale_factor
lowercase_ :Dict = offset
lowercase_ :Optional[Any] = do_normalize
lowercase_ :Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ :Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :List[Any] = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" in size:
lowercase_ :int = get_resize_output_image_size(lowercase , size["shortest_edge"] , default_to_square=lowercase )
elif "height" in size and "width" in size:
lowercase_ :Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : str , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str , ):
"""simple docstring"""
lowercase_ :Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def lowercase__ ( self : List[str] , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : bool = True , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] , ):
"""simple docstring"""
lowercase_ :List[str] = image.astype(np.floataa )
if offset:
lowercase_ :List[str] = image - (scale / 2)
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ):
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
lowercase_ :Optional[int] = to_numpy_array(lowercase )
if do_resize:
lowercase_ :Tuple = self.resize(image=lowercase , size=lowercase , resample=lowercase )
if do_center_crop:
lowercase_ :Any = self.center_crop(lowercase , size=lowercase )
if do_rescale:
lowercase_ :Optional[Any] = self.rescale(image=lowercase , scale=lowercase , offset=lowercase )
if do_normalize:
lowercase_ :Tuple = self.normalize(image=lowercase , mean=lowercase , std=lowercase )
lowercase_ :Optional[Any] = to_channel_dimension_format(lowercase , lowercase )
return image
def lowercase__ ( self : Dict , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :str = do_resize if do_resize is not None else self.do_resize
lowercase_ :Optional[Any] = resample if resample is not None else self.resample
lowercase_ :Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ :Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ :Dict = offset if offset is not None else self.offset
lowercase_ :Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ :int = image_mean if image_mean is not None else self.image_mean
lowercase_ :Optional[int] = image_std if image_std is not None else self.image_std
lowercase_ :int = size if size is not None else self.size
lowercase_ :Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ :List[str] = get_size_dict(lowercase , param_name="crop_size" )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowercase_ :List[str] = make_batched(lowercase )
lowercase_ :List[Any] = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
lowercase_ :Optional[int] = {"pixel_values": videos}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 147 | 0 |
from __future__ import annotations
from fractions import Fraction
def a ( A__ : int , A__ : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def a ( A__ : int ) -> list[str]:
"""simple docstring"""
_lowercase =[]
_lowercase =11
_lowercase =int('1' + '0' * digit_len )
for num in range(A__ , A__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(A__ , A__ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_lowercase =10
return solutions
def a ( A__ : int = 2 ) -> int:
"""simple docstring"""
_lowercase =1.0
for fraction in fraction_list(A__ ):
_lowercase =Fraction(A__ )
result *= frac.denominator / frac.numerator
return int(A__ )
if __name__ == "__main__":
print(solution())
| 205 |
import os
def a ( ) -> Any:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/p022_names.txt' ) as file:
_lowercase =str(file.readlines()[0] )
_lowercase =names.replace('"' , '' ).split(',' )
names.sort()
_lowercase =0
_lowercase =0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
_lowercase =0
return total_score
if __name__ == "__main__":
print(solution())
| 205 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( A__: int = 4 ):
'''simple docstring'''
UpperCAmelCase = abs(A__ ) or 4
return [[1 + x + y * row_size for x in range(A__ )] for y in range(A__ )]
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
return reverse_row(transpose(A__ ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
return reverse_row(reverse_column(A__ ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
return reverse_column(transpose(A__ ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
UpperCAmelCase = [list(A__ ) for x in zip(*A__ )]
return matrix
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
UpperCAmelCase = matrix[::-1]
return matrix
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
UpperCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCAmelCase ( A__: list[list[int]] ):
'''simple docstring'''
for i in matrix:
print(*A__ )
if __name__ == "__main__":
__magic_name__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
__magic_name__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
__magic_name__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 152 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv2FeatureExtractor"]
__A = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ReformerTokenizer
lowerCamelCase = ReformerTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<unk>' )
self.assertEqual(vocab_keys[1],'<s>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(lowercase_ ),1_0_0_0 )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> str:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
@cached_property
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = 'Hello World!'
A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@slow
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
A__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A__ = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
A__ = ' '.join(lowercase_ )
A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['input_ids'].shape
A__ = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
| 7 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__UpperCamelCase =(n * n - 2 * a * n) // (2 * n - 2 * a)
__UpperCamelCase =n - a - b
if c * c == (a * a + b * b):
__UpperCamelCase =a * b * c
if candidate >= product:
__UpperCamelCase =candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 85 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __lowerCAmelCase ( datasets.BeamBasedBuilder ):
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_snake_case , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_snake_case )
class __lowerCAmelCase ( datasets.BeamBasedBuilder ):
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_snake_case , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _UpperCAmelCase ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class __lowerCAmelCase ( lowerCamelCase__ ):
@require_beam
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def snake_case ( self ):
"""simple docstring"""
import apache_beam as beam
_lowerCAmelCase = beam.io.parquetio.WriteToParquet
_lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
_lowerCAmelCase = partial(_snake_case , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=_snake_case )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = NestedBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 82 |
from collections.abc import Iterable
from typing import Generic, TypeVar
A__ = TypeVar("""_T""")
class __lowerCAmelCase ( Generic[_T] ):
def __init__( self , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = list(iterable or [] )
_lowerCAmelCase = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def snake_case ( self , _snake_case ):
"""simple docstring"""
self._stacka.append(_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self._stacka.pop
_lowerCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def UpperCamelCase( ) -> List[str]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case_ = parser.parse_args()
return args.f
class __lowerCamelCase ( __snake_case ):
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
snake_case_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
snake_case_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case_ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCamelCase )
snake_case_ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase )
snake_case_ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase ) | 34 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase( lowercase_ = "" ) -> dict[str, float]:
'''simple docstring'''
snake_case_ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ = BeautifulSoup(requests.get(lowercase_ ).text , """html.parser""" )
snake_case_ = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def UpperCamelCase( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
snake_case_ = get_imdb_top_aaa_movies()
with open(lowercase_ , """w""" , newline="""""" ) as out_file:
snake_case_ = csv.writer(lowercase_ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 34 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
a__ : int = "ssube/stable-diffusion-x4-upscaler-onnx"
def a ( self : Any , _lowercase : Optional[int]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_lowercase ) )
__UpperCAmelCase = torch.manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Union[str, Any] ):
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs()
__UpperCAmelCase = pipe(**_lowercase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a ( self : str ):
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs()
__UpperCAmelCase = pipe(**_lowercase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a ( self : Dict ):
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs()
__UpperCAmelCase = pipe(**_lowercase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a ( self : Any ):
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs()
__UpperCAmelCase = pipe(**_lowercase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a ( self : Dict ):
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs()
__UpperCAmelCase = pipe(**_lowercase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Tuple ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__UpperCAmelCase = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a ( self : List[str] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__UpperCAmelCase = init_image.resize((1_28, 1_28) )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
__UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 332 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
lowercase = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = calculate_rouge(a__, a__, bootstrap_aggregation=a__, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(a__, a__ )
UpperCamelCase__ = calculate_rouge(a__, a__, bootstrap_aggregation=a__, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = '''rougeLsum'''
UpperCamelCase__ = calculate_rouge(a__, a__, newline_sep=a__, rouge_keys=[k] )[k]
UpperCamelCase__ = calculate_rouge(a__, a__, newline_sep=a__, rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = ['''rouge1''', '''rouge2''', '''rougeL''']
UpperCamelCase__ = calculate_rouge(a__, a__, newline_sep=a__, rouge_keys=a__ )
UpperCamelCase__ = calculate_rouge(a__, a__, newline_sep=a__, rouge_keys=a__ )
assert score_sep == score_no_sep
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCamelCase__ = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(a__, a__, newline_sep=a__ ) == calculate_rouge(a__, a__, newline_sep=a__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCamelCase__ = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCamelCase__ = calculate_rouge(a__, a__, rouge_keys=['''rougeLsum'''], newline_sep=a__ )['''rougeLsum''']
UpperCamelCase__ = calculate_rouge(a__, a__, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCamelCase__ = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(a__, a__ )
UpperCamelCase__ = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=a__ )
assert isinstance(a__, a__ ) | 359 | from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if not postfix_notation:
return 0
UpperCamelCase__ = {'''+''', '''-''', '''*''', '''/'''}
UpperCamelCase__ = []
for token in postfix_notation:
if token in operations:
UpperCamelCase__ , UpperCamelCase__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__=5_02_65 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = classifier_dropout
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 25 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Dict = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''retribert'''
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=30_522 , __magic_name__ : int=768 , __magic_name__ : Dict=8 , __magic_name__ : List[Any]=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=2 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : List[str]=True , __magic_name__ : Dict=128 , __magic_name__ : Union[str, Any]=0 , **__magic_name__ : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = share_encoders
SCREAMING_SNAKE_CASE_ = projection_dim
| 118 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
A__ : Dict =[int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = os.path.dirname(os.path.realpath(lowerCAmelCase ) )
_lowerCAmelCase = os.path.join(lowerCAmelCase , """words.txt""" )
_lowerCAmelCase = """"""
with open(lowerCAmelCase ) as f:
_lowerCAmelCase = f.readline()
_lowerCAmelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_lowerCAmelCase = [
word
for word in [sum(ord(lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 220 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A__ : Any =logging.getLogger()
A__ : int =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Optional[Any] , __snake_case : Any ) -> int:
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowerCAmelCase = {"""source""": """What is love ?""", """target""": """life"""}
_lowerCAmelCase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__snake_case , f"{split}.{field}" ) , """w""" ) as f:
f.write(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : str = "pytorch" ) -> int:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = os.path.join(__snake_case , """output""" )
_lowerCAmelCase = os.path.join(__snake_case , """data""" )
self._create_dummy_data(data_dir=__snake_case )
_lowerCAmelCase = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
_lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__snake_case , env=self.get_env() )
_lowerCAmelCase = os.path.join(__snake_case , """metrics.json""" )
with open(__snake_case ) as f:
_lowerCAmelCase = json.load(__snake_case )
return result
@require_torch_gpu
def lowercase__ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : int ) -> List[str]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 220 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ = random.Random()
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
if rng is None:
_A : Dict = global_rng
_A : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=4_0_0 , __lowerCamelCase=2_0_0_0 , __lowerCamelCase=2_4 , __lowerCamelCase=2_4 , __lowerCamelCase=0.0 , __lowerCamelCase=1_6_0_0_0 , __lowerCamelCase=True , __lowerCamelCase=True , ) -> Tuple:
_A : Tuple = parent
_A : Any = batch_size
_A : List[Any] = min_seq_length
_A : List[Any] = max_seq_length
_A : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Optional[Any] = feature_size
_A : List[Any] = num_mel_bins
_A : Optional[int] = padding_value
_A : List[Any] = sampling_rate
_A : List[Any] = return_attention_mask
_A : List[str] = do_normalize
def _lowerCamelCase ( self) -> List[Any]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , __lowerCamelCase=False , __lowerCamelCase=False) -> Union[str, Any]:
def _flatten(__lowerCamelCase):
return list(itertools.chain(*__lowerCamelCase))
if equal_length:
_A : List[Any] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
_A : List[Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_A : str = [np.asarray(__lowerCamelCase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self) -> Any:
_A : Dict = SpeechaTextFeatureExtractionTester(self)
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0) - 1) < 1e-3))
def _lowerCamelCase ( self) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_A : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Any = [np.asarray(__lowerCamelCase) for speech_input in speech_inputs]
# Test feature size
_A : List[Any] = feature_extractor(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
_A : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
_A : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
# Test batched
_A : Optional[int] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
_A : Optional[int] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
# Test 2-D numpy arrays are batched.
_A : int = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_A : Optional[Any] = np.asarray(__lowerCamelCase)
_A : Dict = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
_A : Union[str, Any] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
def _lowerCamelCase ( self) -> Dict:
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : int = ["longest", "max_length", "do_not_pad"]
_A : int = [None, 1_6, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase):
_A : Optional[Any] = feature_extractor(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_attention_mask=__lowerCamelCase)
_A : Union[str, Any] = inputs.input_features
_A : int = inputs.attention_mask
_A : List[str] = [np.sum(__lowerCamelCase) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowerCamelCase ( self) -> Optional[int]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Any = ["longest", "max_length", "do_not_pad"]
_A : str = [None, 1_6, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase):
_A : Any = feature_extractor(
__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase)
_A : Dict = inputs.input_features
_A : str = inputs.attention_mask
_A : int = [np.sum(__lowerCamelCase) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Tuple = feature_extractor(
__lowerCamelCase , padding="max_length" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
_A : Tuple = inputs.input_features
_A : Optional[int] = inputs.attention_mask
_A : Optional[Any] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : Union[str, Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Optional[int] = feature_extractor(
__lowerCamelCase , padding="longest" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
_A : List[Any] = inputs.input_features
_A : int = inputs.attention_mask
_A : Tuple = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4))
_A : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : List[Any] = feature_extractor(
__lowerCamelCase , padding="longest" , max_length=1_6 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
_A : Optional[int] = inputs.input_features
_A : Tuple = inputs.attention_mask
_A : List[str] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4))
def _lowerCamelCase ( self) -> str:
import torch
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : str = np.random.rand(1_0_0 , 3_2).astype(np.floataa)
_A : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
_A : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
from datasets import load_dataset
_A : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
_A : Dict = ds.sort("id").select(range(__lowerCamelCase))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self) -> Any:
# fmt: off
_A : Dict = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
])
# fmt: on
_A : Union[str, Any] = self._load_datasamples(1)
_A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : Tuple = feature_extractor(__lowerCamelCase , return_tensors="pt").input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4))
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCamelCase , atol=1e-4))
| 11 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> str:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
if len(UpperCAmelCase_ ) <= 1:
return arr, 0
__lowerCamelCase : str = len(UpperCAmelCase_ ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : List[str] = arr[mid:]
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Any = _count_cross_inversions(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = 0
while i < len(UpperCAmelCase_ ) and j < len(UpperCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Any = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
# an empty list should also have zero inversions
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( snake_case__ ):
_a : str = ["""image_processor""", """tokenizer"""]
_a : List[str] = """Pix2StructImageProcessor"""
_a : List[str] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = False
super().__init__(_A , _A )
def __call__( self , _A=None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 2_0_4_8 , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCAmelCase = self.tokenizer
__lowerCAmelCase = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCAmelCase = self.image_processor(
_A , return_tensors=_A , max_patches=_A , **_A )
else:
# add pixel_values and bbox
__lowerCAmelCase = self.image_processor(
_A , return_tensors=_A , max_patches=_A , header_text=_A , **_A )
if text is not None and not self.image_processor.is_vqa:
__lowerCAmelCase = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if "attention_mask" in text_encoding:
__lowerCAmelCase = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
__lowerCAmelCase = text_encoding.pop("input_ids" )
else:
__lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 102 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A , _A=0.0 , _A = None , _A = "geglu" , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = "layer_norm" , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(_A , _A )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(_A , _A )
else:
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
__lowerCAmelCase = Attention(
query_dim=_A , heads=_A , dim_head=_A , dropout=_A , bias=_A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(_A , _A )
if self.use_ada_layer_norm
else nn.LayerNorm(_A , elementwise_affine=_A )
)
__lowerCAmelCase = Attention(
query_dim=_A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_A , dim_head=_A , dropout=_A , bias=_A , upcast_attention=_A , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
__lowerCAmelCase = FeedForward(_A , dropout=_A , activation_fn=_A , final_dropout=_A )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(_A , _A )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
_A , _A , _A , hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(_A )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
_A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_A , **_A , )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(_A , _A ) if self.use_ada_layer_norm else self.norma(_A )
)
__lowerCAmelCase = self.attna(
_A , encoder_hidden_states=_A , attention_mask=_A , **_A , )
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(_A )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(_A ) for hid_slice in norm_hidden_states.chunk(_A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__lowerCAmelCase = self.ff(_A )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A = None , _A = 4 , _A = 0.0 , _A = "geglu" , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(_A , _A )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(_A , _A , approximate="tanh" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(_A , _A )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(_A , _A )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(_A )
# project dropout
self.net.append(nn.Dropout(_A ) )
# project out
self.net.append(nn.Linear(_A , _A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_A ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
for module in self.net:
__lowerCAmelCase = module(_A )
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A = "none" ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , _A )
__lowerCAmelCase = approximate
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.proj(_A )
__lowerCAmelCase = self.gelu(_A )
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , dim_out * 2 )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.proj(_A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_A )
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.proj(_A )
return x * torch.sigmoid(1.7_02 * x )
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Embedding(_A , _A )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(_A , embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.linear(self.silu(self.emb(_A ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(_A , 2 )
__lowerCAmelCase = self.norm(_A ) * (1 + scale) + shift
return x
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(_A , _A )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(_A , 6 * embedding_dim , bias=_A )
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A , eps=1E-6 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=None ):
"""simple docstring"""
__lowerCAmelCase = self.linear(self.silu(self.emb(_A , _A , hidden_dtype=_A ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6 , dim=1 )
__lowerCAmelCase = self.norm(_A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A , _A = None , _A = 1E-5 ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(_A )
__lowerCAmelCase = nn.Linear(_A , out_dim * 2 )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if self.act:
__lowerCAmelCase = self.act(_A )
__lowerCAmelCase = self.linear(_A )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2 , dim=1 )
__lowerCAmelCase = F.group_norm(_A , self.num_groups , eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 102 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def __snake_case ( self : Tuple):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self : List[str]):
a : Optional[int] = ort.SessionOptions()
a : str = False
return options
def __snake_case ( self : Optional[int]):
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
a : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : int = "A red cat sitting on a park bench"
a : Optional[Any] = np.random.RandomState(0)
a : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 40 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ : int = get_logger(__name__)
def UpperCamelCase ( _A : int , _A : Tuple , _A : Optional[Any] , _A : Any , _A : Union[str, Any]=0 )-> Dict:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
A__ = os.path.join(_A , _A )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_A , _A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_A , _A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = os.path.join(_A , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(_A , exist_ok=_A )
logger.info(f"""Saving model to {ckpt_dir}""" )
A__ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_A , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def UpperCamelCase ( _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : List[Any]=0 )-> Optional[int]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
A__ = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
A__ = os.path.join(_A , _A )
logger.info(f"""Loading model from {input_model_file}""" )
A__ = torch.load(_A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Loading model from {input_model_file}""" )
A__ = torch.load(_A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = (
os.path.join(_A , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
A__ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_A , storage_reader=dist_cp.FileSystemReader(_A ) , planner=DefaultLoadPlanner() , )
A__ = state_dict["model"]
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(_A )
def UpperCamelCase ( _A : str , _A : List[str] , _A : str , _A : Optional[int] , _A : int , _A : int=0 )-> int:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = FSDP.optim_state_dict(_A , _A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A__ = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(_A , _A )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
A__ = os.path.join(_A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(_A , exist_ok=_A )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def UpperCamelCase ( _A : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any] , _A : Any=0 )-> List[str]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A__ = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
A__ = torch.load(_A )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
A__ = (
os.path.join(_A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
A__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_A ) , )
A__ = optim_state["optimizer"]
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
A__ = FSDP.optim_state_dict_to_load(_A , _A , _A )
optimizer.load_state_dict(_A )
| 366 |
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
A__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( _A : str )-> dict[str, str]:
"""simple docstring"""
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(_A )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = input("Enter message to encode or decode: " ).strip()
A__ = input("Enter keyword: " ).strip()
A__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
A__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
A__ = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : str = set()
# Replace all the whitespace in our sentence
lowercase__ : Tuple = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : Dict = [False] * 26
for char in input_str:
if char.islower():
lowercase__ : List[Any] = True
elif char.isupper():
lowercase__ : Optional[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def snake_case__ ( ):
'''simple docstring'''
from timeit import timeit
lowercase__ : Union[str, Any] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_faster()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_fastest()' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 214 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
return torch.atana(__UpperCamelCase ,__UpperCamelCase ) / math.pi * 2
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCamelCase ,__UpperCamelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = DiffusionAttnUnetaD(A, n_attn_layers=4 )
SCREAMING_SNAKE_CASE : Dict = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE : str = torch.quasirandom.SobolEngine(1, scramble=A )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MODELS_MAP[model_name]['url']
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
UpperCamelCase_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
UpperCamelCase_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
UpperCamelCase_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
UpperCamelCase_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
UpperCamelCase_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
UpperCamelCase_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' ,RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] )
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCamelCase ) and not isinstance(__UpperCamelCase ,__UpperCamelCase ):
return name.replace(__UpperCamelCase ,__UpperCamelCase )
elif name.startswith(__UpperCamelCase ):
return [name.replace(__UpperCamelCase ,__UpperCamelCase ) for v in value]
raise ValueError(f"Attn error with {name}" )
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: List[Any]=13 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' ,'time_proj' )
SCREAMING_SNAKE_CASE : Any = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE : Tuple = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE : int = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE : Any = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE : Optional[int] = string[:2]
SCREAMING_SNAKE_CASE : List[str] = string[2:]
else:
SCREAMING_SNAKE_CASE : int = string[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE : List[Any] = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Any = 'mid_block'
elif depth > 0 and int(__UpperCamelCase ) < 7:
SCREAMING_SNAKE_CASE : Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Optional[int] = f"down_blocks.{depth}"
elif depth > 0 and int(__UpperCamelCase ) > 7:
SCREAMING_SNAKE_CASE : Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Optional[Any] = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
SCREAMING_SNAKE_CASE : List[Any] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Dict = f"up_blocks.{max_depth - 1}" if int(__UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
SCREAMING_SNAKE_CASE : int = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE : List[str] = convert_resconv_naming(__UpperCamelCase )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE : Any = convert_attn_naming(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = new_string_left
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : int = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE : List[Any] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE : Tuple = rename(__UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = transform_conv_attns(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : str = v
return new_state_dict
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: int ,__UpperCamelCase: str ):
"""simple docstring"""
if len(__UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE : Optional[int] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE : List[str] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE : Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE : Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE : Dict = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE : Union[str, Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
SCREAMING_SNAKE_CASE : Optional[Any] = download(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE : Union[str, Any] = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE : Tuple = Object()
SCREAMING_SNAKE_CASE : Union[str, Any] = sample_size
SCREAMING_SNAKE_CASE : Optional[Any] = sample_rate
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel(sample_size=__UpperCamelCase ,sample_rate=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = DiffusionUncond(__UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path ,map_location=__UpperCamelCase )['state_dict'] )
SCREAMING_SNAKE_CASE : Any = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE : Tuple = orig_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = rename_orig_weights(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCamelCase ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith('kernel' ) for k in list(__UpperCamelCase ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE : Tuple = value.squeeze()
SCREAMING_SNAKE_CASE : Tuple = value
diffusers_model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_00
SCREAMING_SNAKE_CASE : Dict = 33
SCREAMING_SNAKE_CASE : List[Any] = IPNDMScheduler(num_train_timesteps=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn([1, 2, config.sample_size] ,generator=__UpperCamelCase ).to(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.linspace(1 ,0 ,steps + 1 ,device=__UpperCamelCase )[:-1]
SCREAMING_SNAKE_CASE : Dict = get_crash_schedule(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = DanceDiffusionPipeline(unet=__UpperCamelCase ,scheduler=__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=__UpperCamelCase ,generator=__UpperCamelCase ).audios
SCREAMING_SNAKE_CASE : str = sampling.iplms_sample(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,{} )
SCREAMING_SNAKE_CASE : int = generated.clamp(-1 ,1 )
SCREAMING_SNAKE_CASE : str = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' ,__UpperCamelCase )
print('Diff max' ,__UpperCamelCase )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase_ = parser.parse_args()
main(args)
| 246 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCamelCase ):
if len(__UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCamelCase ) )
return data_lists
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = min(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = max(__UpperCamelCase )
SCREAMING_SNAKE_CASE : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Invalid weight of {weight:f} provided"
raise ValueError(__UpperCamelCase )
score_lists.append(__UpperCamelCase )
return score_lists
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = final_scores[j] + ele
return final_scores
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_data(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = calculate_each_score(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = generate_final_scores(__UpperCamelCase )
# append scores to source data
for i, ele in enumerate(__UpperCamelCase ):
source_data[i].append(__UpperCamelCase )
return source_data
| 246 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : int = """roberta"""
def __init__( self, lowerCamelCase=5_02_65, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase="absolute", lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : str = vocab_size
_lowercase : Optional[int] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Tuple = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Any = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : int = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : Union[str, Any] = use_cache
_lowercase : Union[str, Any] = classifier_dropout
class _lowerCamelCase( _a ):
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 21 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_A = 'Create a default config file for Accelerate with only a few flags set.'
def UpperCAmelCase ( a_="no", a_ = default_json_config_file, a_ = False ):
'''simple docstring'''
lowerCamelCase : Dict = Path(a_ )
path.parent.mkdir(parents=a_, exist_ok=a_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
lowerCamelCase : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
lowerCamelCase : Any = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowerCamelCase : List[str] = torch.cuda.device_count()
lowerCamelCase : str = num_gpus
lowerCamelCase : int = False
if num_gpus > 1:
lowerCamelCase : int = 'MULTI_GPU'
else:
lowerCamelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
lowerCamelCase : Optional[int] = torch.xpu.device_count()
lowerCamelCase : Dict = num_xpus
lowerCamelCase : Optional[int] = False
if num_xpus > 1:
lowerCamelCase : Any = 'MULTI_XPU'
else:
lowerCamelCase : Any = 'NO'
elif is_npu_available():
lowerCamelCase : Optional[Any] = torch.npu.device_count()
lowerCamelCase : List[Any] = num_npus
lowerCamelCase : Any = False
if num_npus > 1:
lowerCamelCase : Tuple = 'MULTI_NPU'
else:
lowerCamelCase : List[Any] = 'NO'
else:
lowerCamelCase : Tuple = 0
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Optional[int] = 1
lowerCamelCase : int = 'NO'
lowerCamelCase : Dict = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = parser.add_parser('default', parents=a_, help=a_, formatter_class=a_ )
parser.add_argument(
'--config_file', default=a_, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), dest='save_location', )
parser.add_argument(
'--mixed_precision', choices=['no', 'fp16', 'bf16'], type=a_, help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.', default='no', )
parser.set_defaults(func=a_ )
return parser
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 205 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
_A = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'trajectory_transformer'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase_=100 , UpperCAmelCase_=5 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=249 , UpperCAmelCase_=6 , UpperCAmelCase_=17 , UpperCAmelCase_=25 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=128 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0006 , UpperCAmelCase_=512 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=50256 , UpperCAmelCase_=50256 , **UpperCAmelCase_ , ) -> List[Any]:
lowerCamelCase : int = vocab_size
lowerCamelCase : List[str] = action_weight
lowerCamelCase : List[Any] = reward_weight
lowerCamelCase : List[str] = value_weight
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : List[str] = block_size
lowerCamelCase : Any = action_dim
lowerCamelCase : List[Any] = observation_dim
lowerCamelCase : Any = transition_dim
lowerCamelCase : int = learning_rate
lowerCamelCase : Union[str, Any] = n_layer
lowerCamelCase : Tuple = n_head
lowerCamelCase : Any = n_embd
lowerCamelCase : Union[str, Any] = embd_pdrop
lowerCamelCase : Optional[int] = attn_pdrop
lowerCamelCase : int = resid_pdrop
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = kaiming_initializer_range
lowerCamelCase : str = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 205 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int = None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
a_ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
a_ : List[str] = self.feature_extraction_class()
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 32 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a : Dict = logging.get_logger(__name__)
a : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a : Optional[int] = {
'RUCAIBox/mvp': 1_024,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = MvpTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: str = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop("""type""" ) )
UpperCAmelCase_: Dict = add_prefix_space
UpperCAmelCase_: List[str] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_: Optional[int] = """post_processor"""
UpperCAmelCase_: Any = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCAmelCase_: Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_: Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_: int = tuple(state["""cls"""] )
UpperCAmelCase_: Any = False
if state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: Tuple = add_prefix_space
UpperCAmelCase_: Union[str, Any] = True
if state.get("""trim_offsets""", SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCAmelCase_: Optional[Any] = trim_offsets
UpperCAmelCase_: Dict = True
if changes_to_apply:
UpperCAmelCase_: Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop("""type""" ) )
UpperCAmelCase_: Dict = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCAmelCase_: str = value
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: int = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: Union[str, Any] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> int:
UpperCAmelCase_: Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Dict = [self.sep_token_id]
UpperCAmelCase_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase__ = logging.get_logger(__name__)
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , **UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[Any] = feature_size
snake_case : Dict = sampling_rate
snake_case : List[Any] = padding_value
snake_case : Dict = kwargs.pop('''padding_side''' , '''right''' )
snake_case : Tuple = kwargs.pop('''return_attention_mask''' , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case : Any = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
snake_case : Any = processed_features[self.model_input_names[0]]
snake_case : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCamelCase__ ) == 0:
if return_attention_mask:
snake_case : Any = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case : int = required_input[0]
if isinstance(UpperCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCamelCase__ ):
snake_case : Any = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCamelCase__ ):
snake_case : Tuple = '''tf'''
elif is_torch_tensor(UpperCamelCase__ ):
snake_case : Tuple = '''pt'''
elif isinstance(UpperCamelCase__ , (int, float, list, tuple, np.ndarray) ):
snake_case : Any = '''np'''
else:
raise ValueError(
f'type of {first_element} unknown: {type(UpperCamelCase__ )}. '
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case : Dict = to_numpy(UpperCamelCase__ )
else:
snake_case : int = [to_numpy(UpperCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case : Tuple = self._get_padding_strategies(padding=UpperCamelCase__ , max_length=UpperCamelCase__ )
snake_case : Any = processed_features[self.model_input_names[0]]
snake_case : Dict = len(UpperCamelCase__ )
if not all(len(UpperCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case : Union[str, Any] = []
for i in range(UpperCamelCase__ ):
snake_case : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case : int = self._truncate(
UpperCamelCase__ , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
truncated_inputs.append(UpperCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case : int = PaddingStrategy.MAX_LENGTH
snake_case : int = {}
for i in range(UpperCamelCase__ ):
# padding
snake_case : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case : str = []
if value.dtype is np.dtype(np.floataa ):
snake_case : Dict = value.astype(np.floataa )
batch_outputs[key].append(UpperCamelCase__ )
return BatchFeature(UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ) -> dict:
"""simple docstring"""
snake_case : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case : Tuple = len(UpperCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case : Optional[int] = np.ones(len(UpperCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case : Union[str, Any] = max_length - len(UpperCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case : str = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case : Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case : Optional[int] = np.pad(
UpperCamelCase__ , UpperCamelCase__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case : Optional[int] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case : int = np.pad(
UpperCamelCase__ , UpperCamelCase__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ) -> List[Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case : Union[str, Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case : List[Any] = len(UpperCamelCase__ ) > max_length
if needs_to_be_truncated:
snake_case : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case : List[str] = processed_features['''attention_mask'''][:max_length]
return processed_features
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[Any]=None ) -> Dict:
"""simple docstring"""
if padding is not False:
if padding is True:
snake_case : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Any = PaddingStrategy(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Any = padding
else:
snake_case : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 83 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Any=[10, 20, 30, 40] , UpperCamelCase__ : Any=[1, 1, 2, 1] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : int = image_size
snake_case : Any = num_channels
snake_case : Optional[int] = embeddings_size
snake_case : Optional[int] = hidden_sizes
snake_case : str = depths
snake_case : Tuple = is_training
snake_case : List[str] = use_labels
snake_case : List[str] = hidden_act
snake_case : Tuple = num_labels
snake_case : Tuple = scope
snake_case : List[str] = len(UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModel(config=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : List[str] = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : str = self.prepare_config_and_inputs()
snake_case ,snake_case : Tuple = config_and_inputs
snake_case : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : int = [*signature.parameters.keys()]
snake_case : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest('''JIT Enabled''' ):
snake_case : Optional[int] = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case : Tuple = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
snake_case : str = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
snake_case : Any = self.default_image_processor
snake_case : Any = prepare_img()
snake_case : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors='''np''' )
snake_case : List[str] = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Dict = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __magic_name__ ( self : str , __lowercase : str , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AudioClassificationPipeline(model=__lowercase , feature_extractor=__lowercase )
# test with a raw waveform
SCREAMING_SNAKE_CASE__ : Optional[int] =np.zeros((3_40_00,) )
SCREAMING_SNAKE_CASE__ : str =np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =examples
SCREAMING_SNAKE_CASE__ : str =audio_classifier(__lowercase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =audio_classifier(__lowercase , top_k=1 )
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
self.run_torchaudio(__lowercase )
@require_torchaudio
def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> Optional[Any]:
import datasets
# test with a local file
SCREAMING_SNAKE_CASE__ : Optional[int] =datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
SCREAMING_SNAKE_CASE__ : int =dataset[0]['''audio''']['''array''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =audio_classifier(__lowercase )
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
@require_torch
def __magic_name__ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict ='''anton-l/wav2vec2-random-tiny-classifier'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipeline('''audio-classification''' , model=__lowercase )
SCREAMING_SNAKE_CASE__ : str =np.ones((80_00,) )
SCREAMING_SNAKE_CASE__ : List[str] =audio_classifier(__lowercase , top_k=4 )
SCREAMING_SNAKE_CASE__ : Dict =[
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
SCREAMING_SNAKE_CASE__ : List[str] =[
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE__ : List[str] ={'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE__ : Tuple =audio_classifier(__lowercase , top_k=4 )
self.assertIn(nested_simplify(__lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __magic_name__ ( self : Dict ) -> Any:
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''superb/wav2vec2-base-superb-ks'''
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline('''audio-classification''' , model=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : List[str] =np.array(dataset[3]['''speech'''] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : int =audio_classifier(__lowercase , top_k=4 )
self.assertEqual(
nested_simplify(__lowercase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
pass | 152 |
'''simple docstring'''
import socket
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ : str =socket.gethostname()
SCREAMING_SNAKE_CASE__ : List[Any] =1_2_3_1_2
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE__ : List[str] =sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 152 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
__lowerCamelCase : Union[str, Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = os.path.dirname(os.path.realpath(snake_case_ ) )
snake_case__ : Optional[int] = os.path.join(snake_case_ , "words.txt" )
snake_case__ : Optional[int] = ""
with open(snake_case_ ) as f:
snake_case__ : int = f.readline()
snake_case__ : Dict = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
snake_case__ : str = [
word
for word in [sum(ord(snake_case_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case_ )
if __name__ == "__main__":
print(solution())
| 286 |
__lowerCamelCase : Optional[int] = """Tobias Carryer"""
from time import time
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Dict=int(time() ) ): # noqa: B008
snake_case__ : List[Any] = multiplier
snake_case__ : Optional[int] = increment
snake_case__ : Optional[int] = modulo
snake_case__ : Union[str, Any] = seed
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : int = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 286 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = CLIPConfig
lowerCAmelCase_ : Dict = ["CLIPEncoderLayer"]
def __init__( self , a__ ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
snake_case_ = CLIPVisionModelWithProjection(config.vision_config )
snake_case_ = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case_ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase__ ( self , a__ , a__ , a__=0.5 , a__=0.5 ) -> Any:
'''simple docstring'''
snake_case_ = self.vision_model(a__ )[0]
snake_case_ = self.p_head(a__ )
snake_case_ = nsfw_detected.flatten()
snake_case_ = nsfw_detected > p_threshold
snake_case_ = nsfw_detected.tolist()
if any(a__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(a__ ):
if nsfw_detected_:
snake_case_ = np.zeros(images[idx].shape )
snake_case_ = self.w_head(a__ )
snake_case_ = watermark_detected.flatten()
snake_case_ = watermark_detected > w_threshold
snake_case_ = watermark_detected.tolist()
if any(a__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(a__ ):
if watermark_detected_:
snake_case_ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ :
"""simple docstring"""
def __init__( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ):
__lowercase = question_encoder
__lowercase = generator
__lowercase = self.question_encoder
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ):
if os.path.isfile(_snake_case ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
__lowercase = os.path.join(_snake_case ,'''question_encoder_tokenizer''' )
__lowercase = os.path.join(_snake_case ,'''generator_tokenizer''' )
self.question_encoder.save_pretrained(_snake_case )
self.generator.save_pretrained(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,lowercase__ : Optional[Any] ,**lowercase__ : Optional[int] ):
from ..auto.tokenization_auto import AutoTokenizer
__lowercase = kwargs.pop('''config''' ,_snake_case )
if config is None:
__lowercase = RagConfig.from_pretrained(_snake_case )
__lowercase = AutoTokenizer.from_pretrained(
_snake_case ,config=config.question_encoder ,subfolder='''question_encoder_tokenizer''' )
__lowercase = AutoTokenizer.from_pretrained(
_snake_case ,config=config.generator ,subfolder='''generator_tokenizer''' )
return cls(question_encoder=_snake_case ,generator=_snake_case )
def __call__( self : List[Any] ,*lowercase__ : List[str] ,**lowercase__ : List[Any] ):
return self.current_tokenizer(*_snake_case ,**_snake_case )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : str ,**lowercase__ : Union[str, Any] ):
return self.generator.batch_decode(*_snake_case ,**_snake_case )
def SCREAMING_SNAKE_CASE ( self : str ,*lowercase__ : Optional[int] ,**lowercase__ : Any ):
return self.generator.decode(*_snake_case ,**_snake_case )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.question_encoder
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.generator
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Optional[List[str]] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,lowercase__ : str = "longest" ,lowercase__ : str = None ,lowercase__ : bool = True ,**lowercase__ : Optional[int] ,):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' ,_snake_case ,)
if max_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
_snake_case ,add_special_tokens=_snake_case ,return_tensors=_snake_case ,max_length=_snake_case ,padding=_snake_case ,truncation=_snake_case ,**_snake_case ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
text_target=_snake_case ,add_special_tokens=_snake_case ,return_tensors=_snake_case ,padding=_snake_case ,max_length=_snake_case ,truncation=_snake_case ,**_snake_case ,)
__lowercase = labels['''input_ids''']
return model_inputs
| 357 |
'''simple docstring'''
import string
def _A ( A__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowercase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase = string.ascii_uppercase.find(A__ )
__lowercase = num - key
if num < 0:
__lowercase = num + len(string.ascii_uppercase )
__lowercase = translated + string.ascii_uppercase[num]
else:
__lowercase = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Encrypted message: ''' )
__lowercase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A ={'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 34 |
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ) -> Iterator[int]:
'''simple docstring'''
_UpperCAmelCase : int = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE__ ):
yield num
num += 1
def __snake_case ( SCREAMING_SNAKE_CASE__ : int = 2_000_000 ) -> int:
'''simple docstring'''
return sum(takewhile(lambda SCREAMING_SNAKE_CASE__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , __A : int = 7_6_8 , ):
super().__init__()
__UpperCamelCase = nn.Parameter(torch.zeros(1 , __A ) )
__UpperCamelCase = nn.Parameter(torch.ones(1 , __A ) )
def _lowerCamelCase ( self : Optional[Any] , __A : Optional[Union[str, torch.device]] = None , __A : Optional[torch.dtype] = None , ):
__UpperCamelCase = nn.Parameter(self.mean.to(__A ).to(__A ) )
__UpperCamelCase = nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def _lowerCamelCase ( self : Optional[int] , __A : Optional[int] ):
__UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self : Optional[int] , __A : Tuple ):
__UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 53 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : float , __magic_name__ : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__magic_name__ ) , __magic_name__ )
return number - int(__magic_name__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 62 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : str ):
UpperCamelCase :Dict = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer("""This is me""" , return_tensors="""pt""" )
UpperCamelCase :int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCamelCase :List[Any] = model.generate(**__lowerCamelCase )
UpperCamelCase :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCamelCase :List[Any] = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase :Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
UpperCamelCase :int = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 62 | 1 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand()
class a ( a_ ):
@staticmethod
def UpperCamelCase_ ( _lowerCamelCase ):
lowercase = parser.add_parser('env' )
download_parser.set_defaults(func=_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = huggingface_hub.__version__
lowercase = 'not installed'
lowercase = 'NA'
if is_torch_available():
import torch
lowercase = torch.__version__
lowercase = torch.cuda.is_available()
lowercase = 'not installed'
if is_transformers_available():
import transformers
lowercase = transformers.__version__
lowercase = 'not installed'
if is_accelerate_available():
import accelerate
lowercase = accelerate.__version__
lowercase = 'not installed'
if is_xformers_available():
import xformers
lowercase = xformers.__version__
lowercase = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_lowerCamelCase ) )
return info
@staticmethod
def UpperCamelCase_ ( _lowerCamelCase ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 220 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Optional[Any] = 3
class a ( a_ ):
pass
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = int(os.environ['RANK'] )
lowercase = int(os.environ['WORLD_SIZE'] )
lowercase = ArgumentParser()
parser.add_argument('--streaming' , type=__snake_case )
parser.add_argument('--local_rank' , type=__snake_case )
parser.add_argument('--num_workers' , type=__snake_case , default=0 )
lowercase = parser.parse_args()
lowercase = args.streaming
lowercase = args.num_workers
lowercase = {'shards': [f'shard_{shard_idx}' for shard_idx in range(__snake_case )]}
lowercase = IterableDataset.from_generator(__snake_case , gen_kwargs=__snake_case )
if not streaming:
lowercase = Dataset.from_list(list(__snake_case ) )
lowercase = split_dataset_by_node(__snake_case , rank=__snake_case , world_size=__snake_case )
lowercase = torch.utils.data.DataLoader(__snake_case , num_workers=__snake_case )
lowercase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 220 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int = None , _lowerCAmelCase : int = None ):
super().__init__()
__snake_case : List[str] = pad_token_id
__snake_case : List[str] = max_length
__snake_case : Optional[Any] = vocab
__snake_case : List[str] = merges
__snake_case : str = BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Union[str, Any] , _lowerCAmelCase : GPTaTokenizer , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
__snake_case : int = [""" """.join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
__snake_case : Tuple = tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : str , _lowerCAmelCase : Union[str, os.PathLike] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
__snake_case : str = GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Optional[int] , _lowerCAmelCase : Tuple ):
return cls(**_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def snake_case__ ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int = None ):
__snake_case : Union[str, Any] = self.tf_tokenizer(_lowerCAmelCase )
__snake_case : List[Any] = tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__snake_case : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
__snake_case , __snake_case : str = pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 20 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = inspect.getfile(accelerate.test_utils )
__snake_case : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__snake_case : Any = test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__snake_case : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
| 102 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = """▁"""
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =BigBirdTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''<s>'''
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(a_ ) , 10_04 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_rust_tokenizer()
__snake_case : Dict = '''I was born in 92000, and this is falsé.'''
__snake_case : int = tokenizer.tokenize(a_ )
__snake_case : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Optional[int] = tokenizer.encode(a_ )
__snake_case : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer(a_ , keep_accents=a_ )
__snake_case : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''Hello World!'''
__snake_case : List[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case : Optional[int] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Tuple = ''' '''.join(a_ )
__snake_case : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
__snake_case : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__snake_case : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 102 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a="</s>", __a="<unk>", __a="<pad>", __a=125, __a=None, **__a, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[str] = [f"<extra_id_{i}>" for i in range(__a)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : str = len(set(filter(lambda __a: bool("extra_id" in str(__a)), __a)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens")
_lowerCAmelCase : List[Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
super().__init__(
eos_token=__a, unk_token=__a, pad_token=__a, extra_ids=__a, additional_special_tokens=__a, **__a, )
_lowerCAmelCase : int = extra_ids
_lowerCAmelCase : Union[str, Any] = 2**8 # utf is 8 bits
# define special tokens dict
_lowerCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowerCAmelCase : List[str] = len(self.special_tokens_encoder)
_lowerCAmelCase : Tuple = len(__a)
for i, token in enumerate(__a):
_lowerCAmelCase : str = self.vocab_size + i - n
_lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__ ( self):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a)) + [1]
return ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a):
'''simple docstring'''
if len(__a) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = self._add_eos_if_not_present(__a)
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Dict = self._add_eos_if_not_present(__a)
return token_ids_a + token_ids_a
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [chr(__a) for i in text.encode("utf-8")]
return tokens
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.special_tokens_encoder:
_lowerCAmelCase : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[str] = self.added_tokens_encoder[token]
elif len(__a) != 1:
_lowerCAmelCase : List[Any] = self.unk_token_id
else:
_lowerCAmelCase : Dict = ord(__a) + self._num_special_tokens
return token_id
def snake_case__ ( self, __a):
'''simple docstring'''
if index in self.special_tokens_decoder:
_lowerCAmelCase : Tuple = self.special_tokens_decoder[index]
else:
_lowerCAmelCase : List[Any] = chr(index - self._num_special_tokens)
return token
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = B""
for token in tokens:
if token in self.special_tokens_decoder:
_lowerCAmelCase : Optional[Any] = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.added_tokens_decoder:
_lowerCAmelCase : Dict = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.special_tokens_encoder:
_lowerCAmelCase : str = token.encode("utf-8")
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[str] = token.encode("utf-8")
else:
_lowerCAmelCase : Optional[int] = bytes([ord(__a)])
bstring += tok_string
_lowerCAmelCase : int = bstring.decode("utf-8", errors="ignore")
return string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return ()
| 364 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A( a__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE__ = ["""prompt"""]
SCREAMING_SNAKE_CASE__ = ["""prompt""", """negative_prompt"""]
SCREAMING_SNAKE_CASE__ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ = False
@property
def UpperCAmelCase_ (self ):
return 32
@property
def UpperCAmelCase_ (self ):
return 32
@property
def UpperCAmelCase_ (self ):
return self.time_input_dim
@property
def UpperCAmelCase_ (self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ (self ):
return 1_00
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__lowerCAmelCase )
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
UpperCamelCase__ = PriorTransformer(**__lowerCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCamelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
UpperCamelCase__ = CLIPVisionModelWithProjection(__lowerCAmelCase )
return model
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowerCAmelCase , do_normalize=__lowerCAmelCase , do_resize=__lowerCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.dummy_prior
UpperCamelCase__ = self.dummy_image_encoder
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = self.dummy_tokenizer
UpperCamelCase__ = self.dummy_image_processor
UpperCamelCase__ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=__lowerCAmelCase , clip_sample_range=10.0 , )
UpperCamelCase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
UpperCamelCase__ = torch.manual_seed(__lowerCAmelCase )
else:
UpperCamelCase__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCamelCase__ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = '''cpu'''
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**__lowerCAmelCase )
UpperCamelCase__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
UpperCamelCase__ = output.image_embeds
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
UpperCamelCase__ = image[0, -10:]
UpperCamelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCamelCase__ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase_ (self ):
UpperCamelCase__ = torch_device == '''cpu'''
UpperCamelCase__ = True
UpperCamelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , test_mean_pixel_difference=__lowerCAmelCase , )
@skip_mps
def UpperCAmelCase_ (self ):
UpperCamelCase__ = torch_device == '''cpu'''
UpperCamelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCAmelCase , test_mean_pixel_difference=__lowerCAmelCase , )
| 244 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__ : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ : Any = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__ : List[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__ : str = model(__lowerCAmelCase , labels=__lowerCAmelCase ).loss
lowercase__ : List[str] = -tf.math.reduce_mean(__lowerCAmelCase ).numpy()
lowercase__ : str = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 198 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "distilbert"
__lowerCamelCase : int = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=512, lowerCamelCase__=False, lowerCamelCase__=6, lowerCamelCase__=12, lowerCamelCase__=768, lowerCamelCase__=4 * 768, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__="gelu", lowerCamelCase__=0.02, lowerCamelCase__=0.1, lowerCamelCase__=0.2, lowerCamelCase__=0, **lowerCamelCase__, ):
A : Tuple = vocab_size
A : List[str] = max_position_embeddings
A : Any = sinusoidal_pos_embds
A : Tuple = n_layers
A : str = n_heads
A : Optional[int] = dim
A : str = hidden_dim
A : Optional[Any] = dropout
A : Any = attention_dropout
A : Any = activation
A : int = initializer_range
A : Dict = qa_dropout
A : Any = seq_classif_dropout
super().__init__(**lowerCamelCase__, pad_token_id=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 115 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = "altclip_text_model"
def __init__( self, lowerCamelCase__=25_0002, lowerCamelCase__=1024, lowerCamelCase__=24, lowerCamelCase__=16, lowerCamelCase__=4096, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=514, lowerCamelCase__=1, lowerCamelCase__=0.02, lowerCamelCase__=0.02, lowerCamelCase__=1e-05, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=768, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : Union[str, Any] = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : str = hidden_act
A : Dict = intermediate_size
A : List[str] = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : Optional[Any] = type_vocab_size
A : Optional[Any] = initializer_range
A : Optional[int] = initializer_factor
A : Tuple = layer_norm_eps
A : List[str] = position_embedding_type
A : int = use_cache
A : int = project_dim
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "altclip_vision_model"
def __init__( self, lowerCamelCase__=768, lowerCamelCase__=3072, lowerCamelCase__=512, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=32, lowerCamelCase__="quick_gelu", lowerCamelCase__=1e-5, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Optional[Any] = hidden_size
A : Optional[int] = intermediate_size
A : Union[str, Any] = projection_dim
A : str = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = num_channels
A : Tuple = patch_size
A : List[Any] = image_size
A : Optional[int] = initializer_range
A : Union[str, Any] = initializer_factor
A : List[str] = attention_dropout
A : int = layer_norm_eps
A : str = hidden_act
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
cls._set_token_in_kwargs(lowerCamelCase__ )
A , A : Optional[Any] = cls.get_config_dict(lowerCamelCase__, **lowerCamelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
A : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__, **lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "altclip"
__lowerCamelCase : List[Any] = True
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=768, lowerCamelCase__=2.6592, **lowerCamelCase__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A : Dict = kwargs.pop("""text_config_dict""", lowerCamelCase__ )
A : str = kwargs.pop("""vision_config_dict""", lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A : Dict = {}
# This is the complete result when using `text_config_dict`.
A : str = AltCLIPTextConfig(**lowerCamelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : Optional[int] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCamelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A : int = {}
# This is the complete result when using `vision_config_dict`.
A : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A : Optional[int] = {
str(lowerCamelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A : Optional[int] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : Any = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCamelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A : Tuple = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
A : Union[str, Any] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
A : Dict = AltCLIPTextConfig(**lowerCamelCase__ )
A : Optional[int] = AltCLIPVisionConfig(**lowerCamelCase__ )
A : List[str] = projection_dim
A : Any = logit_scale_init_value
A : Tuple = 1.0
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = copy.deepcopy(self.__dict__ )
A : Any = self.text_config.to_dict()
A : List[str] = self.vision_config.to_dict()
A : Union[str, Any] = self.__class__.model_type
return output
| 115 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self , _A ) -> None:
'''simple docstring'''
_UpperCAmelCase : Tuple = value
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
class _UpperCAmelCase :
def __init__( self , _A ) -> None:
'''simple docstring'''
_UpperCAmelCase : int = tree
def __snake_case ( self , _A ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_UpperCAmelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : List[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_UpperCAmelCase : Dict = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
# load decoder from hub
_UpperCAmelCase : List[Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def __snake_case ( self , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_A , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = feature_extractor(_A , return_tensors="""np""" )
_UpperCAmelCase : int = processor(_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=_A )
_UpperCAmelCase : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , _A=(2, 10, 16) , _A=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase : Optional[int] = processor.decode(_A )
_UpperCAmelCase : str = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : int = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
_UpperCAmelCase : Dict = processor.batch_decode(_A , _A )
_UpperCAmelCase : Tuple = list(_A )
with get_context("""fork""" ).Pool() as p:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(_A , _A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 15
_UpperCAmelCase : Dict = -20.0
_UpperCAmelCase : List[str] = -4.0
_UpperCAmelCase : Any = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Any = decoded_processor_out.text
_UpperCAmelCase : Any = list(_A )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : str = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : List[str] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1e-3 ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : Any = 2.0
_UpperCAmelCase : Union[str, Any] = 5.0
_UpperCAmelCase : List[Any] = -20.0
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
_UpperCAmelCase : Tuple = decoded_processor_out.text
_UpperCAmelCase : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
_UpperCAmelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _A )
_UpperCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_A )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Union[str, Any] = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = processor_wavaveca(_A , return_tensors="""np""" )
_UpperCAmelCase : Any = processor_auto(_A , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
_UpperCAmelCase : Dict = processor_wavaveca.batch_decode(_A )
_UpperCAmelCase : Optional[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def __snake_case ( _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()[0]
_UpperCAmelCase : Tuple = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_A , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ) -> str:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_A )
_UpperCAmelCase : List[Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_UpperCAmelCase : List[Any] = iter(_A )
_UpperCAmelCase : Optional[Any] = next(_A )
_UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_UpperCAmelCase : Dict = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_A ).logits.cpu().numpy()
_UpperCAmelCase : Union[str, Any] = processor.decode(logits[0] , output_word_offsets=_A )
_UpperCAmelCase : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Optional[int] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_UpperCAmelCase : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , _A )
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , output.text )
# output times
_UpperCAmelCase : List[str] = torch.tensor(self.get_from_offsets(_A , """start_time""" ) )
_UpperCAmelCase : Any = torch.tensor(self.get_from_offsets(_A , """end_time""" ) )
# fmt: off
_UpperCAmelCase : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 246 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCamelCase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : pyspark.sql.DataFrame , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "arrow" , **UpperCAmelCase__ : Dict , ) -> Any:
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = load_from_cache_file
__SCREAMING_SNAKE_CASE = file_format
__SCREAMING_SNAKE_CASE = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__SCREAMING_SNAKE_CASE = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 355 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Any = TaTokenizerFast
a__ : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 195 | 0 |
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
_lowercase =data
_lowercase =previous
_lowercase =next_node
def __str__( self ) -> str:
'''simple docstring'''
return F'''{self.data}'''
def A__ ( self ) -> int:
'''simple docstring'''
return self.data
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return self.next
def A__ ( self ) -> Any:
'''simple docstring'''
return self.previous
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =head
def __iter__( self ) -> List[str]:
'''simple docstring'''
return self
def A__ ( self ) -> Tuple:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_lowercase =self.current.get_data()
_lowercase =self.current.get_next()
return value
class __lowerCAmelCase :
def __init__( self ) -> int:
'''simple docstring'''
_lowercase =None # First node in list
_lowercase =None # Last node in list
def __str__( self ) -> Dict:
'''simple docstring'''
_lowercase =self.head
_lowercase =[]
while current is not None:
nodes.append(current.get_data() )
_lowercase =current.get_next()
return " ".join(str(lowerCAmelCase ) for node in nodes )
def __contains__( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.head
while current:
if current.get_data() == value:
return True
_lowercase =current.get_next()
return False
def __iter__( self ) -> int:
'''simple docstring'''
return LinkedListIterator(self.head )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def A__ ( self ) -> Any:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def A__ ( self , lowerCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
_lowercase =node
_lowercase =node
else:
self.insert_before_node(self.head , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowerCAmelCase )
else:
self.insert_after_node(self.tail , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> None:
'''simple docstring'''
_lowercase =Node(lowerCAmelCase )
if self.head is None:
self.set_head(lowerCAmelCase )
else:
self.set_tail(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
_lowercase =node
_lowercase =node.previous
if node.get_previous() is None:
_lowercase =node_to_insert
else:
_lowercase =node_to_insert
_lowercase =node_to_insert
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
_lowercase =node
_lowercase =node.next
if node.get_next() is None:
_lowercase =node_to_insert
else:
_lowercase =node_to_insert
_lowercase =node_to_insert
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
_lowercase =1
_lowercase =Node(lowerCAmelCase )
_lowercase =self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase , lowerCAmelCase )
return
current_position += 1
_lowercase =node.next
self.insert_after_node(self.tail , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Node:
'''simple docstring'''
_lowercase =self.head
while node:
if node.get_data() == item:
return node
_lowercase =node.get_next()
raise Exception('Node not found' )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if (node := self.get_node(lowerCAmelCase )) is not None:
if node == self.head:
_lowercase =self.head.get_next()
if node == self.tail:
_lowercase =self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase )
@staticmethod
def A__ ( lowerCAmelCase ) -> None:
'''simple docstring'''
if node.get_next():
_lowercase =node.previous
if node.get_previous():
_lowercase =node.next
_lowercase =None
_lowercase =None
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.head is None
def a ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 1 |
def UpperCamelCase( lowercase_ ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
from __future__ import annotations
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0**1_2 ):
_UpperCamelCase : str = 1
_UpperCamelCase : Any = 0
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 38 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = """roberta-prelayernorm"""
def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : List[Any] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Tuple = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Dict = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Any = position_embedding_type
A_ : Optional[Any] = use_cache
A_ : Tuple = classifier_dropout
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 286 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class _lowerCAmelCase ( __A, __A ):
"""simple docstring"""
lowerCamelCase = '''convnextv2'''
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0.0 , _lowerCamelCase=224 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) -> str:
super().__init__(**_lowerCamelCase )
A_ : Dict = num_channels
A_ : str = patch_size
A_ : Optional[Any] = num_stages
A_ : Any = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
A_ : Optional[int] = [3, 3, 9, 3] if depths is None else depths
A_ : Union[str, Any] = hidden_act
A_ : str = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : int = drop_path_rate
A_ : Tuple = image_size
A_ : Dict = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
A_ , A_ : int = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 164 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
A_ : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A_ : Tuple = n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
A_ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
UpperCamelCase__ : Any = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 164 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_lowercase =len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , )
def UpperCAmelCase_ ( __snake_case ) -> None:
"""simple docstring"""
_lowercase =[]
depth_first_search([] , [] , [] , __snake_case , __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print('''''' )
print(len(__snake_case ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 5 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
if b == 0:
return (1, 0)
(__lowerCAmelCase) : List[Any] = extended_euclid(lowerCAmelCase__ ,a % b )
__lowerCAmelCase : List[str] = a // b
return (y, x - k * y)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
(__lowerCAmelCase) : int = extended_euclid(lowerCAmelCase__ ,lowerCAmelCase__ )
__lowerCAmelCase : str = na * na
__lowerCAmelCase : Tuple = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowercase ( __snake_case ,__snake_case ) -> Any:
(__lowerCAmelCase) : Dict = extended_euclid(lowerCAmelCase__ ,lowerCAmelCase__ )
if b < 0:
__lowerCAmelCase : Optional[Any] = (b % n + n) % n
return b
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
__lowerCAmelCase : Union[str, Any] = invert_modulo(lowerCAmelCase__ ,lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ ,lowerCAmelCase__ )
__lowerCAmelCase : Tuple = na * na
__lowerCAmelCase : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 365 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> int:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 58 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if num <= 0:
raise ValueError('math domain error' )
return quad(__snake_case , 0 , __snake_case , args=(__snake_case) )[0]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
return math.pow(__snake_case , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "laion/clap-htsat-unfused"
lowercase : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **_a ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self , **_a ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[int] = self.get_feature_extractor(do_normalize=_a , padding_value=1.0 )
lowercase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : List[str] = self.get_tokenizer()
lowercase : int = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Dict = floats_list((3, 1_000) )
lowercase : str = feature_extractor(_a , return_tensors="np" )
lowercase : Dict = processor(audios=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Dict = self.get_feature_extractor()
lowercase : int = self.get_tokenizer()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Optional[Any] = "This is a test string"
lowercase : Any = processor(text=_a )
lowercase : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_feature_extractor()
lowercase : Any = self.get_tokenizer()
lowercase : Union[str, Any] = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.batch_decode(_a )
lowercase : Optional[int] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ClapProcessor(tokenizer=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
__lowerCamelCase : Optional[Any] = torch.tensor(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 ) # Batch size 1
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ )[0] # The last hidden-state is the first element of the output tuple
__lowerCamelCase : Union[str, Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowerCamelCase : Union[str, Any] = logits[0, masked_index, :]
__lowerCamelCase : Optional[Any] = logits.softmax(dim=0 )
__lowerCamelCase , __lowerCamelCase : Any = prob.topk(k=SCREAMING_SNAKE_CASE__ , dim=0 )
__lowerCamelCase : Dict = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] )
__lowerCamelCase : Union[str, Any] = tokenizer.mask_token
__lowerCamelCase : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
__lowerCamelCase : str = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(SCREAMING_SNAKE_CASE__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase_ = CamembertTokenizer.from_pretrained('camembert-base')
lowercase_ = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
lowercase_ = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 194 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , a: Optional[Any] , a: Optional[Any]=3 , a: List[str]=32 , a: Optional[int]=3 , a: Any=10 , a: List[str]=[10, 20, 30, 40] , a: Any=[1, 1, 2, 1] , a: Optional[int]=True , a: List[str]=True , a: Tuple="relu" , a: List[Any]=3 , a: List[Any]=None , ):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Any = batch_size
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = embeddings_size
__lowerCamelCase : Optional[int] = hidden_sizes
__lowerCamelCase : Optional[Any] = depths
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Tuple = scope
__lowerCamelCase : Union[str, Any] = len(a )
def _snake_case ( self: int ):
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values
def _snake_case ( self: List[str] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self: Tuple , a: Optional[int] , a: int ):
__lowerCamelCase : Optional[Any] = FlaxRegNetModel(config=a )
__lowerCamelCase : List[str] = model(a )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self: Optional[int] , a: List[Any] , a: List[Any] ):
__lowerCamelCase : Tuple = self.num_labels
__lowerCamelCase : Union[str, Any] = FlaxRegNetForImageClassification(config=a )
__lowerCamelCase : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: Tuple ):
__lowerCamelCase : Dict = FlaxRegNetModelTester(self )
__lowerCamelCase : List[str] = ConfigTester(self , config_class=a , has_text_modality=a )
def _snake_case ( self: Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self: List[Any] ):
return
def _snake_case ( self: Dict ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _snake_case ( self: Tuple ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _snake_case ( self: str ):
pass
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(a )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: List[str] ):
def check_hidden_states_output(a: List[Any] , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : str = model_class(a )
__lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
check_hidden_states_output(a , a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : str = self._prepare_for_class(a , a )
__lowerCamelCase : List[str] = model_class(a )
@jax.jit
def model_jitted(a: Optional[int] , **a: str ):
return model(pixel_values=a , **a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : List[str] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Optional[int] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Tuple ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCamelCase : Tuple = self.default_image_processor
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=a , return_tensors='np' )
__lowerCamelCase : List[Any] = model(**a )
# verify the logits
__lowerCamelCase : str = (1, 1000)
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : int = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 194 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =generate_pascal_triangle(SCREAMING_SNAKE_CASE__ )
for row_idx in range(SCREAMING_SNAKE_CASE__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__UpperCamelCase =[]
for current_row_idx in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
triangle.append(SCREAMING_SNAKE_CASE__ )
return triangle
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase =1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ):
calculate_current_element(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return current_row
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
__UpperCamelCase =triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase =triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase =above_to_left_elt + above_to_right_elt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__UpperCamelCase =[[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[0] + result[-1] + [0]
__UpperCamelCase =row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase =sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) )
__UpperCamelCase =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase =row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE__ )
return result
def _UpperCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : int ) -> None:
__UpperCamelCase =F'{func.__name__}({value})'
__UpperCamelCase =timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 62 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase :Tuple = logging.get_logger('''transformers.models.speecht5''')
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[Any] ):
"""simple docstring"""
hf_model.apply_weight_norm()
__magic_name__ : List[str] = checkpoint['input_conv.weight_g']
__magic_name__ : int = checkpoint['input_conv.weight_v']
__magic_name__ : List[Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__magic_name__ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
__magic_name__ : List[str] = checkpoint[f'upsamples.{i}.1.weight_v']
__magic_name__ : Optional[Any] = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__magic_name__ : int = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
__magic_name__ : int = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
__magic_name__ : Any = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
__magic_name__ : Union[str, Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
__magic_name__ : Dict = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
__magic_name__ : Tuple = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
__magic_name__ : Tuple = checkpoint['output_conv.1.weight_g']
__magic_name__ : int = checkpoint['output_conv.1.weight_v']
__magic_name__ : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : Dict = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase )
else:
__magic_name__ : int = SpeechTaHifiGanConfig()
__magic_name__ : Optional[Any] = SpeechTaHifiGan(lowerCAmelCase )
__magic_name__ : str = torch.load(lowerCAmelCase )
load_weights(orig_checkpoint['model']['generator'] , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[Any] = np.load(lowerCAmelCase )
__magic_name__ : Optional[Any] = stats[0].reshape(-1 )
__magic_name__ : Dict = stats[1].reshape(-1 )
__magic_name__ : Any = torch.from_numpy(lowerCAmelCase ).float()
__magic_name__ : Any = torch.from_numpy(lowerCAmelCase ).float()
model.save_pretrained(lowerCAmelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Any = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCAmelCase :Any = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 275 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = ["""pixel_values"""]
def __init__( self : Any , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[Any] , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 256}
__magic_name__ : str = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__magic_name__ : Optional[int] = get_size_dict(_A )
__magic_name__ : Union[str, Any] = do_resize
__magic_name__ : List[Any] = size
__magic_name__ : List[str] = resample
__magic_name__ : Dict = do_center_crop
__magic_name__ : List[str] = crop_size
__magic_name__ : int = do_rescale
__magic_name__ : Tuple = rescale_factor
__magic_name__ : List[str] = do_normalize
__magic_name__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ : Dict = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ) -> np.ndarray:
__magic_name__ : int = get_size_dict(_A )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple ) -> np.ndarray:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : List[Any] , ) -> List[str]:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Tuple = size if size is not None else self.size
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : Dict = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : Dict = crop_size if crop_size is not None else self.crop_size
__magic_name__ : List[str] = get_size_dict(_A )
__magic_name__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Any = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Tuple = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Union[str, Any] = image_std if image_std is not None else self.image_std
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : List[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : Union[str, Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__magic_name__ : Union[str, Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__magic_name__ : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : Optional[Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Union[str, Any] = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : List[str] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 275 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase : int = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[List[ImageInput]]:
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __snake_case ( lowerCAmelCase ):
_a : Dict= ["pixel_values"]
def __init__( self ,snake_case = True ,snake_case = None ,snake_case = PILImageResampling.BILINEAR ,snake_case = True ,snake_case = None ,snake_case = True ,snake_case = 1 / 255 ,snake_case = True ,snake_case = True ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : int = size if size is not None else {"""shortest_edge""": 256}
lowercase : Union[str, Any] = get_size_dict(snake_case ,default_to_square=snake_case )
lowercase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase : Tuple = get_size_dict(snake_case ,param_name="""crop_size""" )
lowercase : Union[str, Any] = do_resize
lowercase : str = size
lowercase : Optional[int] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : List[str] = resample
lowercase : Any = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Any = offset
lowercase : Optional[int] = do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = PILImageResampling.BILINEAR ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : List[Any] = get_size_dict(snake_case ,default_to_square=snake_case )
if "shortest_edge" in size:
lowercase : str = get_resize_output_image_size(snake_case ,size["""shortest_edge"""] ,default_to_square=snake_case )
elif "height" in size and "width" in size:
lowercase : Optional[int] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(snake_case ,size=snake_case ,resample=snake_case ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : List[Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(snake_case ,size=(size["""height"""], size["""width"""]) ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = True ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = image.astype(np.floataa )
if offset:
lowercase : Optional[Any] = image - (scale / 2)
return rescale(snake_case ,scale=snake_case ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case = None ,**snake_case ,):
'''simple docstring'''
return normalize(snake_case ,mean=snake_case ,std=snake_case ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = ChannelDimension.FIRST ,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase : Optional[int] = to_numpy_array(snake_case )
if do_resize:
lowercase : Union[str, Any] = self.resize(image=snake_case ,size=snake_case ,resample=snake_case )
if do_center_crop:
lowercase : Optional[Any] = self.center_crop(snake_case ,size=snake_case )
if do_rescale:
lowercase : Optional[int] = self.rescale(image=snake_case ,scale=snake_case ,offset=snake_case )
if do_normalize:
lowercase : Any = self.normalize(image=snake_case ,mean=snake_case ,std=snake_case )
lowercase : Any = to_channel_dimension_format(snake_case ,snake_case )
return image
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = ChannelDimension.FIRST ,**snake_case ,):
'''simple docstring'''
lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = offset if offset is not None else self.offset
lowercase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : int = size if size is not None else self.size
lowercase : List[Any] = get_size_dict(snake_case ,default_to_square=snake_case )
lowercase : str = crop_size if crop_size is not None else self.crop_size
lowercase : List[Any] = get_size_dict(snake_case ,param_name="""crop_size""" )
if not valid_images(snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase : List[Any] = make_batched(snake_case )
lowercase : Any = [
[
self._preprocess_image(
image=snake_case ,do_resize=snake_case ,size=snake_case ,resample=snake_case ,do_center_crop=snake_case ,crop_size=snake_case ,do_rescale=snake_case ,rescale_factor=snake_case ,offset=snake_case ,do_normalize=snake_case ,image_mean=snake_case ,image_std=snake_case ,data_format=snake_case ,)
for img in video
]
for video in videos
]
lowercase : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case ,tensor_type=snake_case )
| 20 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : str = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
lowercase : Dict = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
lowercase : int = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return float((preds == labels).mean() )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Any = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Union[str, Any] = float(pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
lowercase : Dict = float(spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case ,snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case ,snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case ,snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case ,snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 20 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase ={
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 242 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase =logging.getLogger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
_UpperCAmelCase : Union[str, Any] =None
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
_UpperCAmelCase : Optional[Any] =self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase : Optional[int] =str(distributed_port + 1)
_UpperCAmelCase : Any =dist.new_group(ranks=snake_case , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return dist.get_rank(group=self.process_group) == 0
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=torch.floataa) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =torch.empty(snake_case , dtype=snake_case)
dist.scatter(snake_case , src=0 , scatter_list=snake_case , group=self.process_group)
return target_tensor
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase : str =next((addr for addr in addrs if addr.startswith('e')) , snake_case)
return ifname
def lowerCAmelCase ( self , snake_case , snake_case) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self._main_retrieve(snake_case , snake_case)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case)
# distributed training
_UpperCAmelCase : Optional[int] =dist.get_world_size(group=self.process_group)
# gather logic
_UpperCAmelCase : str =None
if self._is_main():
_UpperCAmelCase : Union[str, Any] =[torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(snake_case)]
dist.gather(torch.tensor(snake_case) , dst=0 , gather_list=snake_case , group=self.process_group)
# scatter logic
_UpperCAmelCase : Optional[Any] =question_hidden_states.shape[0]
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Any =[]
if self._is_main():
assert len(snake_case) == world_size
_UpperCAmelCase , _UpperCAmelCase : Tuple =self._main_retrieve(torch.cat(snake_case).numpy() , snake_case)
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case), torch.tensor(snake_case)
_UpperCAmelCase : List[str] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : int =self._scattered(snake_case , [n_queries, n_docs] , target_type=torch.intaa)
_UpperCAmelCase : Dict =self._scattered(snake_case , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case)
| 242 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 100 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ) ->Union[str, Any]:
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE : Tuple = audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
SCREAMING_SNAKE_CASE : int = int(_lowerCamelCase )
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE : Tuple = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
SCREAMING_SNAKE_CASE : List[str] = int(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = next(iter(self.unet.parameters() ) ).dtype
SCREAMING_SNAKE_CASE : List[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase , device=audio.device )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE : Tuple = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowerCamelCase )
| 313 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Any = [1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = 0, 0, 0
__lowerCAmelCase : int = ugly_nums[ia] * 2
__lowerCAmelCase : Tuple = ugly_nums[ia] * 3
__lowerCAmelCase : Optional[int] = ugly_nums[ia] * 5
for _ in range(1 ,__snake_case ):
__lowerCAmelCase : List[str] = min(__snake_case ,__snake_case ,__snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
__lowerCAmelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowerCAmelCase : List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowerCAmelCase : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""") | 58 |
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 58 | 1 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> str:
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 115 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 115 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : str=[1, 1, 2] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : List[Any]=8 , __UpperCAmelCase : Optional[int]=37 , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=False , ):
a : Any = parent
a : Tuple = batch_size
a : List[str] = seq_length
a : Any = is_training
a : Union[str, Any] = use_input_mask
a : Dict = use_token_type_ids
a : Tuple = use_labels
a : Union[str, Any] = vocab_size
a : str = block_sizes
a : str = num_decoder_layers
a : List[Any] = d_model
a : Optional[Any] = n_head
a : Optional[Any] = d_head
a : Any = d_inner
a : Optional[int] = hidden_act
a : List[Any] = hidden_dropout
a : Dict = attention_dropout
a : Tuple = activation_dropout
a : int = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = 2
a : Optional[int] = num_labels
a : List[Any] = num_choices
a : List[Any] = scope
a : str = initializer_std
# Used in the tests to check the size of the first attention layer
a : Tuple = n_head
# Used in the tests to check the size of the first hidden state
a : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
a : str = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
a : str = self.num_hidden_layers + 2
def __snake_case ( self : Union[str, Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Dict = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Tuple = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Any = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : int = ids_tensor([self.batch_size] , self.num_choices)
a : Any = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : str , ):
a : Optional[int] = TFFunnelModel(config=__UpperCAmelCase)
a : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : int = model(__UpperCAmelCase)
a : List[str] = [input_ids, input_mask]
a : Optional[int] = model(__UpperCAmelCase)
a : List[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
a : Optional[int] = False
a : Dict = TFFunnelModel(config=__UpperCAmelCase)
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
a : List[str] = False
a : Optional[Any] = TFFunnelModel(config=__UpperCAmelCase)
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , ):
a : str = TFFunnelBaseModel(config=__UpperCAmelCase)
a : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[int] = model(__UpperCAmelCase)
a : int = [input_ids, input_mask]
a : Any = model(__UpperCAmelCase)
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
a : List[Any] = False
a : int = TFFunnelBaseModel(config=__UpperCAmelCase)
a : List[str] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
a : Union[str, Any] = False
a : Optional[int] = TFFunnelBaseModel(config=__UpperCAmelCase)
a : List[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , ):
a : Tuple = TFFunnelForPreTraining(config=__UpperCAmelCase)
a : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Any , ):
a : Union[str, Any] = TFFunnelForMaskedLM(config=__UpperCAmelCase)
a : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Dict = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , ):
a : Union[str, Any] = self.num_labels
a : List[Any] = TFFunnelForSequenceClassification(config=__UpperCAmelCase)
a : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Union[str, Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , ):
a : Dict = self.num_choices
a : Optional[int] = TFFunnelForMultipleChoice(config=__UpperCAmelCase)
a : str = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Optional[Any] = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a : List[str] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , ):
a : Union[str, Any] = self.num_labels
a : Union[str, Any] = TFFunnelForTokenClassification(config=__UpperCAmelCase)
a : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[int] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , ):
a : Dict = TFFunnelForQuestionAnswering(config=__UpperCAmelCase)
a : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : Any):
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : int = config_and_inputs
a : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : Any = False
def __snake_case ( self : Dict):
a : Optional[int] = TFFunnelModelTester(self)
a : str = ConfigTester(self , config_class=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[int]):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@require_tf
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Dict = False
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = TFFunnelModelTester(self , base=__UpperCAmelCase)
a : int = ConfigTester(self , config_class=__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
self.config_tester.run_common_tests()
def __snake_case ( self : Tuple):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase)
def __snake_case ( self : List[str]):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase)
| 226 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
a : str = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A_ , )
is not None
):
a : List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a : int = True
if not attribute_used:
a : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a : List[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a : str = True
elif attribute.endswith("_token_id" ):
a : str = True
# configuration class specific cases
if not case_allowed:
a : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
a : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a : Dict = {}
if len(config_class.attribute_map ) > 0:
a : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a : int = inspect.getsourcefile(A_ )
a : Union[str, Any] = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a : Optional[Any] = [os.path.join(A_ , A_ ) for fn in os.listdir(A_ ) if fn.startswith("modeling_" )]
# Get the source code strings
a : Tuple = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
a : Optional[Any] = []
for config_param, default_value in zip(A_ , A_ ):
# `attributes` here is all the variant names for `config_param`
a : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_ , A_ , A_ , A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : List[Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A_ : inspect.isclass(A_ )
and issubclass(A_ , A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
a : Dict = unused_attributes
if len(A_ ) > 0:
a : Union[str, Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 226 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = test_results.split(' ' )
lowercase = 0
lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
lowercase = None
lowercase = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , __SCREAMING_SNAKE_CASE ):
lowercase = True
lowercase = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowercase = line
lowercase = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = title
lowercase = doc_test_results['time_spent'].split(',' )[0]
lowercase = doc_test_results['success']
lowercase = doc_test_results['failures']
lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self._time_spent]
lowercase = 0
for time in time_spent:
lowercase = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case ) == 1:
lowercase = [0, 0, time_parts[0]]
lowercase , lowercase , lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowercase , lowercase , lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 40
lowercase = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case , snake_case )}
lowercase = ''
for category, failures in category_failures.items():
if len(snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
lowercase = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(snake_case )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowercase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
lowercase = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = ''
for key, value in failures.items():
lowercase = value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowercase = job_name
lowercase = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowercase = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowercase = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowercase = sorted(self.doc_test_results.items() , key=lambda snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowercase = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowercase = job_result['failures']
lowercase = self.get_reply_blocks(snake_case , snake_case , snake_case , text=snake_case )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=snake_case , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCAmelCase_ ( ):
lowercase = os.environ['GITHUB_RUN_ID']
lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase = requests.get(__SCREAMING_SNAKE_CASE ).json()
lowercase = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase = os.listdir(__SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f:
lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}.''' ) from e
return _artifact
def UpperCAmelCase_ ( ):
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = name
lowercase = []
def __str__( self ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.paths.append({'name': self.name, 'path': path} )
lowercase = {}
lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase = directory
if artifact_name not in _available_artifacts:
lowercase = Artifact(__SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(__SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase = get_job_links()
UpperCAmelCase = retrieve_available_artifacts()
UpperCAmelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase = github_actions_job_links.get('''run_doctests''')
UpperCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCAmelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['''stats'''])
UpperCAmelCase = failed
UpperCAmelCase = success
UpperCAmelCase = time_spent[1:-1] + ''', '''
UpperCAmelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCAmelCase = line.replace('''FAILED ''', '''''')
UpperCAmelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCAmelCase , UpperCAmelCase = line.split('''::''')
else:
UpperCAmelCase , UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase = all_failures[test] if test in all_failures else '''N/A'''
UpperCAmelCase = failure
break
UpperCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 195 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> np.array:
a = int(np.ceil((x_end - xa) / step_size))
a = np.zeros((n + 1,))
a = ya
a = xa
for k in range(__UpperCamelCase):
a = y[k] + step_size * ode_func(__UpperCamelCase , y[k])
a = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k]) + ode_func(x + step_size , __UpperCamelCase))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _a ( __a ):
__a : str = """fnet"""
def __init__( self : List[str] , lowercase : List[Any]=32_000 , lowercase : Union[str, Any]=768 , lowercase : Any=12 , lowercase : Optional[Any]=3_072 , lowercase : List[str]="gelu_new" , lowercase : Any=0.1 , lowercase : Any=512 , lowercase : Any=4 , lowercase : Dict=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=False , lowercase : Union[str, Any]=512 , lowercase : Tuple=3 , lowercase : Union[str, Any]=1 , lowercase : Tuple=2 , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_tpu_fourier_optimizations
UpperCAmelCase = tpu_short_seq_length
| 34 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( __a ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
__SCREAMING_SNAKE_CASE : Optional[int] = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) ->str:
super().__init__(self , **a__ )
SCREAMING_SNAKE_CASE : List[str] = repo_info
SCREAMING_SNAKE_CASE : Union[str, Any] = token
SCREAMING_SNAKE_CASE : Dict = None
def __lowerCAmelCase ( self ) ->List[str]:
if self.dir_cache is None:
SCREAMING_SNAKE_CASE : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(a__ ): {'''name''': str(a__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ) ->int:
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
SCREAMING_SNAKE_CASE : Any = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __lowerCAmelCase ( self , _lowerCamelCase , **_lowerCamelCase ) ->int:
self._get_dirs()
SCREAMING_SNAKE_CASE : List[str] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ) ->Tuple:
self._get_dirs()
SCREAMING_SNAKE_CASE : Optional[Any] = PurePosixPath(path.strip('''/''' ) )
SCREAMING_SNAKE_CASE : Tuple = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE : Optional[Any] = PurePosixPath(p.strip('''/''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = p.parent
if root == path:
SCREAMING_SNAKE_CASE : Any = f
SCREAMING_SNAKE_CASE : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 359 |
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCAmelCase_( a__ = 2_000_000 ):
"""simple docstring"""
return sum(takewhile(lambda a__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str=True , __magic_name__ : List[Any]="pt" ) -> int:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__magic_name__ , __magic_name__ ) and not line.startswith(""" """ ) else {}
UpperCamelCase :Optional[Any] = padding_side
return tokenizer(
[line] , max_length=__magic_name__ , padding="""max_length""" if pad_to_max_length else None , truncation=__magic_name__ , return_tensors=__magic_name__ , add_special_tokens=__magic_name__ , **__magic_name__ , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Optional[int] = input_ids.ne(__magic_name__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]="train" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]="" , ):
super().__init__()
UpperCamelCase :str = Path(__lowerCamelCase ).joinpath(type_path + """.source""" )
UpperCamelCase :Dict = Path(__lowerCamelCase ).joinpath(type_path + """.target""" )
UpperCamelCase :str = self.get_char_lens(self.src_file )
UpperCamelCase :Tuple = max_source_length
UpperCamelCase :List[Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
UpperCamelCase :str = tokenizer
UpperCamelCase :Any = prefix
if n_obs is not None:
UpperCamelCase :Dict = self.src_lens[:n_obs]
UpperCamelCase :int = src_lang
UpperCamelCase :Optional[int] = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : Union[str, Any] , __lowerCamelCase : Dict ):
UpperCamelCase :str = index + 1 # linecache starts at 1
UpperCamelCase :Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCamelCase ).rstrip("""\n""" )
UpperCamelCase :Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCamelCase ).rstrip("""\n""" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase :Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer
)
UpperCamelCase :str = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer
UpperCamelCase :Optional[int] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_source_length , """right""" )
UpperCamelCase :Union[str, Any] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_target_length , """right""" )
UpperCamelCase :Dict = source_inputs["""input_ids"""].squeeze()
UpperCamelCase :Tuple = target_inputs["""input_ids"""].squeeze()
UpperCamelCase :int = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _A ( __lowerCamelCase : Tuple ):
return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()]
def _A ( self : Any , __lowerCamelCase : int ):
UpperCamelCase :Optional[int] = torch.stack([x["""input_ids"""] for x in batch] )
UpperCamelCase :Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCamelCase :Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCamelCase :List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCamelCase )
else self.tokenizer.pad_token_id
)
UpperCamelCase :List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCamelCase )
else self.tokenizer.pad_token_id
)
UpperCamelCase :Union[str, Any] = trim_batch(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase , UpperCamelCase :str = trim_batch(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase_ : str = getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[List] ) -> Dict:
"""simple docstring"""
return list(itertools.chain.from_iterable(__magic_name__ ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCamelCase :List[str] = get_git_info()
save_json(__magic_name__ , os.path.join(__magic_name__ , """git_log.json""" ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[str]=4 , **__magic_name__ : Dict ) -> str:
"""simple docstring"""
with open(__magic_name__ , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=__magic_name__ , **__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with open(__magic_name__ ) as f:
return json.load(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :List[str] = git.Repo(search_parent_directories=__magic_name__ )
UpperCamelCase :Optional[Any] = {
"""repo_id""": str(__magic_name__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Callable , __magic_name__ : Iterable ) -> List:
"""simple docstring"""
return list(map(__magic_name__ , __magic_name__ ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , """wb""" ) as f:
return pickle.dump(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
def remove_articles(__magic_name__ : List[Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , __magic_name__ )
def white_space_fix(__magic_name__ : Any ):
return " ".join(text.split() )
def remove_punc(__magic_name__ : int ):
UpperCamelCase :Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__magic_name__ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__magic_name__ ) ) ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Dict = normalize_answer(__magic_name__ ).split()
UpperCamelCase :Union[str, Any] = normalize_answer(__magic_name__ ).split()
UpperCamelCase :str = Counter(__magic_name__ ) & Counter(__magic_name__ )
UpperCamelCase :Dict = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase :List[str] = 1.0 * num_same / len(__magic_name__ )
UpperCamelCase :Tuple = 1.0 * num_same / len(__magic_name__ )
UpperCamelCase :Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
return normalize_answer(__magic_name__ ) == normalize_answer(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
assert len(__magic_name__ ) == len(__magic_name__ )
UpperCamelCase :Dict = 0
for hypo, pred in zip(__magic_name__ , __magic_name__ ):
em += exact_match_score(__magic_name__ , __magic_name__ )
if len(__magic_name__ ) > 0:
em /= len(__magic_name__ )
return {"em": em}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase :str = """dropout_rate"""
for p in extra_params:
if getattr(__magic_name__ , __magic_name__ , __magic_name__ ):
if not hasattr(__magic_name__ , __magic_name__ ) and not hasattr(__magic_name__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__magic_name__ ) )
delattr(__magic_name__ , __magic_name__ )
continue
UpperCamelCase :List[Any] = p if hasattr(__magic_name__ , __magic_name__ ) else equivalent_param[p]
setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
delattr(__magic_name__ , __magic_name__ )
return hparams, config
| 38 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | '''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'xlm-prophetnet'
__lowercase : Optional[int] = ['past_key_values']
__lowercase : int = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 3_0522 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = hidden_size
A_ = encoder_ffn_dim
A_ = num_encoder_layers
A_ = num_encoder_attention_heads
A_ = decoder_ffn_dim
A_ = num_decoder_layers
A_ = num_decoder_attention_heads
A_ = max_position_embeddings
A_ = init_std # Normal(0, this parameter)
A_ = activation_function
# parameters for xlmprophetnet
A_ = ngram
A_ = num_buckets
A_ = relative_max_distance
A_ = disable_ngram_loss
A_ = eps
# 3 Types of Dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = dropout
A_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def __A ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _A ( lowercase__ , lowercase__=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _A ( lowercase__ , lowercase__=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item.replace("""in_layers.0""" , """norm1""" )
lowercase__ = new_item.replace("""in_layers.2""" , """conv1""" )
lowercase__ = new_item.replace("""out_layers.0""" , """norm2""" )
lowercase__ = new_item.replace("""out_layers.3""" , """conv2""" )
lowercase__ = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowercase__ = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowercase__ = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _A ( lowercase__ , lowercase__=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item
lowercase__ = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowercase__ = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowercase__ = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowercase__ = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowercase__ = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None ):
assert isinstance(lowercase__ , lowercase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ = old_checkpoint[path]
lowercase__ = old_tensor.shape[0] // 3
lowercase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowercase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__ , lowercase__ , lowercase__ = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ = query.reshape(lowercase__ )
lowercase__ = key.reshape(lowercase__ )
lowercase__ = value.reshape(lowercase__ )
for path in paths:
lowercase__ = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowercase__ = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowercase__ = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowercase__ = old_checkpoint[path["""old"""]]
def _A ( lowercase__ , lowercase__ ):
lowercase__ = {}
lowercase__ = checkpoint["""time_embed.0.weight"""]
lowercase__ = checkpoint["""time_embed.0.bias"""]
lowercase__ = checkpoint["""time_embed.2.weight"""]
lowercase__ = checkpoint["""time_embed.2.bias"""]
lowercase__ = checkpoint["""input_blocks.0.0.weight"""]
lowercase__ = checkpoint["""input_blocks.0.0.bias"""]
lowercase__ = checkpoint["""out.0.weight"""]
lowercase__ = checkpoint["""out.0.bias"""]
lowercase__ = checkpoint["""out.2.weight"""]
lowercase__ = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowercase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the middle blocks only
lowercase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the output blocks only
lowercase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(lowercase__ )
}
for i in range(1 , lowercase__ ):
lowercase__ = (i - 1) // (config["""num_res_blocks"""] + 1)
lowercase__ = (i - 1) % (config["""num_res_blocks"""] + 1)
lowercase__ = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
lowercase__ = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase__ = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
lowercase__ = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase__ = renew_resnet_paths(lowercase__ )
lowercase__ = {"""old""": f'''input_blocks.{i}.0''', """new""": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase__ = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path, resnet_op] , config=lowercase__ )
if len(lowercase__ ):
lowercase__ = renew_attention_paths(lowercase__ )
lowercase__ = {
"""old""": f'''input_blocks.{i}.1''',
"""new""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
f'''input_blocks.{i}.1.qkv.bias''': {
"""key""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"""key""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowercase__ , config=lowercase__ , )
lowercase__ = middle_blocks[0]
lowercase__ = middle_blocks[1]
lowercase__ = middle_blocks[2]
lowercase__ = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowercase__ = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowercase__ = renew_attention_paths(lowercase__ )
lowercase__ = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , attention_paths_to_split=lowercase__ , config=lowercase__ )
for i in range(lowercase__ ):
lowercase__ = i // (config["""num_res_blocks"""] + 1)
lowercase__ = i % (config["""num_res_blocks"""] + 1)
lowercase__ = [shave_segments(lowercase__ , 2 ) for name in output_blocks[i]]
lowercase__ = {}
for layer in output_block_layers:
lowercase__ , lowercase__ = layer.split(""".""" )[0], shave_segments(lowercase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowercase__ )
else:
lowercase__ = [layer_name]
if len(lowercase__ ) > 1:
lowercase__ = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
lowercase__ = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
lowercase__ = renew_resnet_paths(lowercase__ )
lowercase__ = renew_resnet_paths(lowercase__ )
lowercase__ = {"""old""": f'''output_blocks.{i}.0''', """new""": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowercase__ = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase__ = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(lowercase__ ) == 2:
lowercase__ = []
if len(lowercase__ ):
lowercase__ = renew_attention_paths(lowercase__ )
lowercase__ = {
"""old""": f'''output_blocks.{i}.1''',
"""new""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
f'''output_blocks.{i}.1.qkv.bias''': {
"""key""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"""key""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=lowercase__ , )
else:
lowercase__ = renew_resnet_paths(lowercase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ = """.""".join(["""output_blocks""", str(lowercase__ ), path["""old"""]] )
lowercase__ = """.""".join(["""up_blocks""", str(lowercase__ ), """resnets""", str(lowercase__ ), path["""new"""]] )
lowercase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
__A = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 164 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spm_char.model"}
__A = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__A = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.sp_model.get_piece_size()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def A__ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = []
lowercase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + suffix_ones
return ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 164 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=3,__lowerCamelCase=18,__lowerCamelCase=30,__lowerCamelCase=400,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=[0.5, 0.5, 0.5],__lowerCamelCase=[0.5, 0.5, 0.5],):
A__ = size if size is not None else {'''shortest_edge''': 18}
A__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ):
A__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''size''' ) )
def UpperCamelCase ( self ):
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size,{'''height''': 18, '''width''': 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict,size=42,crop_size=84 )
self.assertEqual(image_processor.size,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size,{'''height''': 84, '''width''': 84} )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
| 39 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: Optional[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''unispeech'''
def __init__( self,__lowerCamelCase=32,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase="group",__lowerCamelCase="gelu",__lowerCamelCase=(512, 512, 512, 512, 512, 512, 512),__lowerCamelCase=(5, 2, 2, 2, 2, 2, 2),__lowerCamelCase=(10, 3, 3, 3, 3, 2, 2),__lowerCamelCase=False,__lowerCamelCase=128,__lowerCamelCase=16,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=0.05,__lowerCamelCase=10,__lowerCamelCase=2,__lowerCamelCase=0.0,__lowerCamelCase=10,__lowerCamelCase=0,__lowerCamelCase=320,__lowerCamelCase=2,__lowerCamelCase=0.1,__lowerCamelCase=100,__lowerCamelCase=256,__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase="mean",__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=256,__lowerCamelCase=80,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=0.5,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ):
return functools.reduce(operator.mul,self.conv_stride,1 )
| 39 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) ->Union[str, Any]:
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
_SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE = """bias"""
else:
_SCREAMING_SNAKE_CASE = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE = name.split(""".""" )
_SCREAMING_SNAKE_CASE = int(items[0] )
_SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True ) ->Optional[int]:
if config_path is not None:
_SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE = target_dict.pad_index
_SCREAMING_SNAKE_CASE = target_dict.bos_index
_SCREAMING_SNAKE_CASE = target_dict.eos_index
_SCREAMING_SNAKE_CASE = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = HubertForCTC(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertModel(__lowerCamelCase )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 58 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase__ = imread(r"digital_image_processing/image_data/lena_small.jpg")
UpperCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = cn.convert_to_negative(__a )
# assert negative_img array for at least one True
assert negative_img.any()
def A ( ) -> Tuple:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__a , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCAmelCase = canny.canny(__a )
# assert canny array for at least one True
assert canny_array.any()
def A ( ) -> Optional[int]:
'''simple docstring'''
assert gg.gaussian_filter(__a , 5 , sigma=0.9 ).all()
def A ( ) -> List[str]:
'''simple docstring'''
# laplace diagonals
_UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCAmelCase = conv.img_convolve(__a , __a ).astype(__a )
assert res.any()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
assert med.median_filter(__a , 3 ).any()
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = sob.sobel_filter(__a )
assert grad.any() and theta.any()
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = sp.make_sepia(__a , 20 )
assert sepia.all()
def A ( _UpperCAmelCase : Dict = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = bs.Burkes(imread(__a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def A ( _UpperCAmelCase : List[Any] = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = rs.NearestNeighbour(imread(__a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_UpperCAmelCase = imread(__a , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = image[x_coordinate][y_coordinate]
_UpperCAmelCase = lbp.get_neighbors_pixel(
__a , __a , __a , __a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCAmelCase = lbp.local_binary_value(__a , __a , __a )
assert lbp_image.any()
| 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=3 , __a=32 , __a=3 , __a=10 , __a=[10, 20, 30, 40] , __a=[1, 1, 2, 1] , __a=True , __a=True , __a="relu" , __a=3 , __a=None , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = self.get_config()
return config, pixel_values
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxRegNetModel(config=__a)
_UpperCamelCase = model(__a)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaxRegNetForImageClassification(config=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> None:
'''simple docstring'''
_UpperCamelCase = FlaxRegNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def model_jitted(__a , **__a):
return model(pixel_values=__a , **__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = model_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = model_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''np''')
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = (1, 10_00)
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
| 194 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = """http://www.mocksite.com/file1.txt"""
_a = """\"text\": [\"foo\", \"foo\"]"""
_a = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase:
lowercase__ = 2_00
lowercase__ = {'Content-Length': '100'}
lowercase__ = {}
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
return [bytes(__a , '''utf-8''')]
def lowerCamelCase__ ( *__snake_case, **__snake_case ) -> int:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(__snake_case, '''request''', __snake_case )
_UpperCamelCase = URL
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = url
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [url]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': url}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = '''downloads'''
_UpperCamelCase = tmp_path
_UpperCamelCase = DownloadConfig(
cache_dir=os.path.join(__snake_case, __snake_case ), use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.download(__snake_case )
_UpperCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [downloaded_paths]
_UpperCamelCase = [urls]
elif isinstance(__snake_case, __snake_case ):
assert "train" in downloaded_paths.keys()
_UpperCamelCase = downloaded_paths.values()
_UpperCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case, __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCamelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(__snake_case )
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = filename
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [filename]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': filename}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = xz_file.parent
_UpperCamelCase = '''extracted'''
_UpperCamelCase = DownloadConfig(
cache_dir=__snake_case, use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.extract(__snake_case )
_UpperCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [extracted_paths]
_UpperCamelCase = [paths]
elif isinstance(__snake_case, __snake_case ):
assert "train" in extracted_paths.keys()
_UpperCamelCase = extracted_paths.values()
_UpperCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case, __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case, etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCamelCase = extracted_path.read_text()
_UpperCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case, start=1 ):
_UpperCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ), start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 194 | 1 |
import argparse
import datetime
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_snake_case = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_snake_case = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_snake_case = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_snake_case = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_snake_case = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_snake_case = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_snake_case = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) )
# Start math
if m <= 2:
_snake_case = y - 1
_snake_case = m + 12
# maths var
_snake_case = int(str(_UpperCamelCase )[:2] )
_snake_case = int(str(_UpperCamelCase )[2:] )
_snake_case = int(2.6 * m - 5.39 )
_snake_case = int(c / 4 )
_snake_case = int(k / 4 )
_snake_case = int(d + k )
_snake_case = int(t + u + v + x )
_snake_case = int(z - (2 * c) )
_snake_case = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_snake_case = F"""Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__A = parser.parse_args()
zeller(args.date_input)
| 361 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 - _cos) / 2
_snake_case = 1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 + _cos) / 2
_snake_case = -1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = _sin / 2
_snake_case = 0
_snake_case = -ba
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 1 - alpha
_snake_case = -2 * _cos
_snake_case = 1 + alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = 1 + alpha * big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha * big_a
_snake_case = 1 + alpha / big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha / big_a
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (pmc + aaa)
_snake_case = 2 * big_a * mpc
_snake_case = big_a * (pmc - aaa)
_snake_case = ppmc + aaa
_snake_case = -2 * pmpc
_snake_case = ppmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (ppmc + aaa)
_snake_case = -2 * big_a * pmpc
_snake_case = big_a * (ppmc - aaa)
_snake_case = pmc + aaa
_snake_case = 2 * mpc
_snake_case = pmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 278 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : List[str] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__lowerCAmelCase : Dict = Vector()
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(A_ ) , '''(0,0,0,0,0,1)''' )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(A_ ) , 4 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = Vector([1, 2] )
__lowerCAmelCase : Optional[int] = Vector([1, 2, 3, 4, 5] )
__lowerCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowerCAmelCase : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = Vector([1, 2, 3] )
__lowerCAmelCase : List[str] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Any = Vector([1, 2, 3] )
__lowerCAmelCase : List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : str = Vector([1, 2, 3] )
__lowerCAmelCase : List[Any] = Vector([2, -1, 4] ) # for test of dot product
__lowerCAmelCase : Optional[int] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : str = Vector([1, 2, 3] )
__lowerCAmelCase : Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , A_ , A_ ) ) , '''(3,4,7)''' )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] )
__lowerCAmelCase : Optional[Any] = x.copy()
self.assertEqual(str(A_ ) , str(A_ ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(A_ ) , '''(0,1,0)''' )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCAmelCase : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(A_ , A_ ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCAmelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(A_ , A_ ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCAmelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCAmelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 275 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase__ (snake_case__ : int = 1_00 ):
"""simple docstring"""
_snake_case : Dict = 1
_snake_case : Any = 2
for i in range(2 , max_n + 1 ):
_snake_case : Tuple = pre_numerator
_snake_case : List[Any] = 2 * i // 3 if i % 3 == 0 else 1
_snake_case : Union[str, Any] = cur_numerator
_snake_case : Tuple = e_cont * pre_numerator + temp
return sum_digits(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 132 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AlbertTokenizer
lowercase__ = AlbertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Optional[int] = AlbertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Dict = """this is a test"""
_snake_case : Optional[int] = """this is a test"""
return input_text, output_text
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = """<pad>"""
_snake_case : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<pad>""" )
self.assertEqual(vocab_keys[1], """<unk>""" )
self.assertEqual(vocab_keys[-1], """▁eloquent""" )
self.assertEqual(len(a_ ), 30_000 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 30_000 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : Optional[int] = """I was born in 92000, and this is falsé."""
_snake_case : Optional[Any] = tokenizer.tokenize(a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : Any = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
_snake_case : int = self.get_rust_tokenizer()
_snake_case : Dict = tokenizer.encode(a_ )
_snake_case : Optional[int] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = AlbertTokenizer(a_, keep_accents=a_ )
_snake_case : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [48, 25, 21, 1_289] )
_snake_case : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
_snake_case : str = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_, [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""], )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = AlbertTokenizer(a_ )
_snake_case : int = tokenizer.encode("""sequence builders""" )
_snake_case : Optional[int] = tokenizer.encode("""multi-sequence build""" )
_snake_case : Any = tokenizer.build_inputs_with_special_tokens(a_ )
_snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""albert-base-v2""", revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""", )
| 132 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase_ ( __UpperCAmelCase=None ) -> Dict:
if subparsers is not None:
lowerCAmelCase__ : Tuple = subparsers.add_parser("""env""" )
else:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCAmelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def lowercase_ ( __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = torch.__version__
lowerCAmelCase__ : List[str] = torch.cuda.is_available()
lowerCAmelCase__ : str = is_xpu_available()
lowerCAmelCase__ : Dict = is_npu_available()
lowerCAmelCase__ : int = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = load_config_from_file(args.config_file ).to_dict()
lowerCAmelCase__ : str = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(__UpperCAmelCase ),
"""PyTorch NPU available""": str(__UpperCAmelCase ),
"""System RAM""": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowerCAmelCase__ : Union[str, Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowerCAmelCase__ : Tuple = (
"""\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else f"""\t{accelerate_config}"""
)
print(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = accelerate_config
return info
def lowercase_ ( ) -> int:
lowerCAmelCase__ : Any = env_command_parser()
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
env_command(__UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 242 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = "Salesforce/blip-image-captioning-base"
_lowerCamelCase :int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCamelCase :List[Any] = "image_captioner"
_lowerCamelCase :Tuple = AutoModelForVisionaSeq
_lowerCamelCase :Dict = ["image"]
_lowerCamelCase :str = ["text"]
def __init__( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : "Image" ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=UpperCamelCase , return_tensors="""pt""" )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(**UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 242 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Tuple = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : List[str] = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = [test_file]
SCREAMING_SNAKE_CASE__ : List[Any] = datasets.load_dataset("""csv""" , data_files=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : List[str] = features_name.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {label: i for i, label in enumerate(__lowerCAmelCase )}
SCREAMING_SNAKE_CASE__ : int = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if len(__lowerCAmelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = ds[k].map(
lambda __lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" ) , batched=__lowerCAmelCase , )
elif len(__lowerCAmelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds[k].map(
lambda __lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , ) , batched=__lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Any = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : List[Any] = (
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = (
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a :List[Any] = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = field(metadata={"""help""": """Which column contains the label"""})
_SCREAMING_SNAKE_CASE :str = field(default=UpperCamelCase_ , metadata={"""help""": """The path of the training file"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default=UpperCamelCase_ , metadata={"""help""": """The path of the development file"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default=UpperCamelCase_ , metadata={"""help""": """The path of the test file"""})
_SCREAMING_SNAKE_CASE :int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :bool = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def _lowercase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(__lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(__lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 56 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , **A ) -> Any:
super().__init__(**A )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(A )
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , A , *A , A=None , A=None , **A ) -> List[Any]:
return super().__call__(A , *A , num_workers=A , batch_size=A , **A )
def snake_case_( self , A , A=64 , A = 0 , A = 512 / 1500 , A = 32 , A = 1 , ) -> Tuple:
_SCREAMING_SNAKE_CASE = load_image(A )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
A , A , A , A , A , A )
_SCREAMING_SNAKE_CASE = self.image_processor(images=A , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(A , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , A , A ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_( self , A , A=0.88 , A=0.95 , A=0 , A=1 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
A , A , A , A , binarize=A )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A , A , A , A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_( self , A , A=False , A=False , A=0.7 , ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(A )
_SCREAMING_SNAKE_CASE = torch.cat(A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
A , A , A , A )
_SCREAMING_SNAKE_CASE = defaultdict(A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 58 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(snake_case_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(snake_case_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(snake_case_ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class __A ( UpperCamelCase__ ):
a__ : List[Any] = ["""pixel_values"""]
def __init__(self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : str , ):
super().__init__(**__a )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = offset
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase (self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowercase (self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ):
UpperCAmelCase_ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def _lowercase (self : Any , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
UpperCAmelCase_ = image.astype(np.floataa )
if offset:
UpperCAmelCase_ = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowercase (self : Optional[Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(__a )
if do_resize:
UpperCAmelCase_ = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(__a , size=__a )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=__a , mean=__a , std=__a )
UpperCAmelCase_ = to_channel_dimension_format(__a , __a )
return image
def _lowercase (self : List[str] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Any , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = offset if offset is not None else self.offset
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(__a )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 361 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A =["gpt2"]
__A ="gpt2"
if is_tf_available():
class UpperCAmelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self : int , a_ : Any ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : str = tokenizer
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(a_ )
__UpperCAmelCase : Optional[int] = TFGPTaLMHeadModel.from_config(a_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def snake_case__ ( self : Dict , a_ : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.tokenizer(a_ )
__UpperCAmelCase : List[str] = tokenized['''input_ids'''].to_tensor()
__UpperCAmelCase : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase : Dict = self.model(input_ids=a_ , attention_mask=a_ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Dict = [GPTaTokenizer.from_pretrained(a_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase : Union[str, Any] = [TFGPTaTokenizer.from_pretrained(a_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase : Any = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase : Optional[int] = tokenizer([test_inputs] , return_tensors='''tf''' )
__UpperCAmelCase : Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase : int = python_outputs[key].numpy()
__UpperCAmelCase : Union[str, Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a_ , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case__ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Any = tf.function(a_ )
for test_inputs in self.test_sentences:
__UpperCAmelCase : List[Any] = tf.constant(a_ )
__UpperCAmelCase : Tuple = compiled_tokenizer(a_ )
__UpperCAmelCase : Any = tf_tokenizer(a_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case__ ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Optional[int] = ModelToSave(tokenizer=a_ )
__UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase : Optional[int] = model.serving(a_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase : Optional[Any] = Path(a_ ) / '''saved.model'''
tf.saved_model.save(a_ , a_ , signatures={'''serving_default''': model.serving} )
__UpperCAmelCase : List[str] = tf.saved_model.load(a_ )
__UpperCAmelCase : str = loaded_model.signatures['''serving_default'''](a_ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : str = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase : Any = tf_tokenizer(a_ ) # Build model with some sample inputs
__UpperCAmelCase : Tuple = tf_tokenizer.get_config()
__UpperCAmelCase : Optional[Any] = TFGPTaTokenizer.from_config(a_ )
__UpperCAmelCase : Any = model_from_config(a_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case__ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase : Optional[int] = 12_31_23
for max_length in [3, 5, 10_24]:
__UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase : int = tf_tokenizer(a_ , max_length=a_ )
__UpperCAmelCase : str = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 226 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , '''width_multiplier''' ) )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Any , a_ : Tuple=13 , a_ : Tuple=64 , a_ : Optional[int]=2 , a_ : List[str]=3 , a_ : Optional[Any]="swish" , a_ : Optional[Any]=3 , a_ : str=32 , a_ : Dict=0.1 , a_ : int=0.0_2 , a_ : Tuple=True , a_ : List[Any]=True , a_ : Optional[int]=10 , a_ : Optional[int]=None , a_ : Dict=0.2_5 , a_ : Tuple=0.0 , a_ : List[Any]=0.0 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = make_divisible(5_12 * width_multiplier , divisor=8 )
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Dict = conv_kernel_size
__UpperCAmelCase : Optional[Any] = output_stride
__UpperCAmelCase : Dict = classifier_dropout_prob
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Optional[Any] = width_multiplier
__UpperCAmelCase : List[str] = ffn_dropout
__UpperCAmelCase : Dict = attn_dropout
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case__ ( self : Optional[Any] , a_ : Dict , a_ : List[Any] , a_ : Dict , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = MobileViTVaModel(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Union[str, Any] , a_ : Dict , a_ : Union[str, Any] , a_ : str , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : List[str] = MobileViTVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : str , a_ : List[str] , a_ : Optional[Any] , a_ : List[str] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[Any] = MobileViTVaForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__UpperCAmelCase : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MobileViTVaModelTester(self )
__UpperCAmelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=a_ , has_text_modality=a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def snake_case__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(a_ )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(a_ : List[Any] , a_ : List[Any] , a_ : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : str = 5
self.assertEqual(len(a_ ) , a_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__UpperCAmelCase : Any = 2
for i in range(len(a_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@slow
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = MobileViTVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
a_ )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__UpperCAmelCase : Any = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : int = model.to(a_ )
__UpperCAmelCase : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**a_ )
__UpperCAmelCase : int = outputs.logits
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a_ )
__UpperCAmelCase : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = model.to(a_ )
__UpperCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
__UpperCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(50, 60)] )
__UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a_ )
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ )
__UpperCAmelCase : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a_ )
| 226 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
# Construct model
if openai_config_file == "":
_snake_case = OpenAIGPTConfig()
else:
_snake_case = OpenAIGPTConfig.from_json_file(__A )
_snake_case = OpenAIGPTModel(__A )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__A , __A , __A )
# Save pytorch-model
_snake_case = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_snake_case = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __A )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowercase : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 369 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : Any = logging.get_logger(__name__)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
_snake_case = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_snake_case = text
def lowerCamelCase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.generated_responses.append(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
_snake_case = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_snake_case = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=0 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = super().__call__(lowerCAmelCase_ , num_workers=lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=32 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_snake_case = self.tokenizer._build_conversation_input_ids(lowerCAmelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(lowerCAmelCase_ )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=10 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length )
_snake_case = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs['attention_mask'][:, -trim:]
_snake_case = model_inputs.pop('conversation' )
_snake_case = max_length
_snake_case = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True ):
"""simple docstring"""
_snake_case = model_outputs['output_ids']
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
_snake_case = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCAmelCase_ )
return conversation
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 160 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a__ : Optional[Any] = object()
# For specifying empty leaf dict `{}`
a__ : Dict = object()
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(snake_case__ ) - len(snake_case__ ) + 1 ):
__A = [x.match(snake_case__ ) for x, y in zip(snake_case__ , ks[i:] )]
if matches and all(snake_case__ ):
return True
return False
def snake_case ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
def replace(UpperCAmelCase , UpperCAmelCase ):
for rule, replacement in rules:
if _match(snake_case__ , snake_case__ ):
return replacement
return val
return replace
def snake_case ( )-> Tuple:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , snake_case__ )),
(("transformer", "wte", "embedding"), P('mp' , snake_case__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , snake_case__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(snake_case__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , snake_case__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def snake_case ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(snake_case__ )
__A = {k: _unmatched for k in flatten_dict(snake_case__ )}
__A = {k: replace(snake_case__ , snake_case__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(snake_case__ ) )
| 161 | def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = set()
# Replace all the whitespace in our sentence
_A = input_str.replace(""" """ , """""")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(snake_case__) == 26
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = [False] * 26
for char in input_str:
if char.islower():
_A = True
elif char.isupper():
_A = True
return all(snake_case__)
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def snake_case ( ) -> None:
from timeit import timeit
_A = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=snake_case__))
print(timeit("""is_pangram_faster()""" , setup=snake_case__))
print(timeit("""is_pangram_fastest()""" , setup=snake_case__))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 180 | 0 |
"""simple docstring"""
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __snake_case ( __lowercase):
snake_case__ : Optional[Any] = '''openai/whisper-base'''
snake_case__ : str = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
snake_case__ : Dict = '''transcriber'''
snake_case__ : Any = WhisperProcessor
snake_case__ : Union[str, Any] = WhisperForConditionalGeneration
snake_case__ : Union[str, Any] = ['''audio''']
snake_case__ : Optional[int] = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.model.generate(inputs=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 361 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
snake_case__ : Optional[datasets.Features] = None
def snake_case_ ( A_ : "pyspark.sql.DataFrame", A_ : List[int], ):
'''simple docstring'''
import pyspark
def generate_fn():
_lowerCamelCase : int = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowerCamelCase : Any = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
_lowerCamelCase : Optional[int] = partition_df.collect()
_lowerCamelCase : List[str] = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
def __init__( self : Tuple , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : Optional[int]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = df
_lowerCamelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCamelCase : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : np.random.Generator ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
snake_case__ : List[Any] = SparkConfig
def __init__( self : Union[str, Any] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCamelCase : int = df
_lowerCamelCase : Any = working_dir
super().__init__(
cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
def create_cache_and_write_probe(__lowerCAmelCase : Optional[int] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCamelCase : Optional[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__lowerCAmelCase : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowerCamelCase : Any = self.df.count()
_lowerCamelCase : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCamelCase : List[Any] = (
self.df.limit(__lowerCAmelCase )
.repartition(1 )
.mapInArrow(__lowerCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCamelCase : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCamelCase : List[str] = min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) )
_lowerCamelCase : Optional[int] = self.df.repartition(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Optional[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowerCamelCase : List[Any] = os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath
_lowerCamelCase : Dict = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCamelCase : str = self.config.features
_lowerCamelCase : Dict = self._writer_batch_size
_lowerCamelCase : List[str] = self._fs.storage_options
def write_arrow(__lowerCAmelCase : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCamelCase : List[str] = pyspark.TaskContext().taskAttemptId()
_lowerCamelCase : Any = next(__lowerCAmelCase , __lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = writer_class(
features=__lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCamelCase , _lowerCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowerCamelCase : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(__lowerCAmelCase )
if writer._num_bytes > 0:
_lowerCamelCase , _lowerCamelCase : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) )
shutil.move(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = (
self.df.mapInArrow(__lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : "datasets.SplitGenerator" , __lowerCAmelCase : str = "arrow" , __lowerCAmelCase : Optional[Union[str, int]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
self._validate_cache_dir()
_lowerCamelCase : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCAmelCase )
_lowerCamelCase : str = not is_remote_filesystem(self._fs )
_lowerCamelCase : Tuple = os.path.join if is_local else posixpath.join
_lowerCamelCase : int = '''-TTTTT-SSSSS-of-NNNNN'''
_lowerCamelCase : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_lowerCamelCase : List[Any] = path_join(self._output_dir , __lowerCAmelCase )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : str = 0
_lowerCamelCase : int = []
_lowerCamelCase : List[str] = []
for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCAmelCase )
_lowerCamelCase : int = total_num_examples
_lowerCamelCase : str = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_lowerCamelCase : Optional[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCamelCase : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
rename(
__lowerCAmelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Any = 0
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase , _lowerCamelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(__lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowerCAmelCase : _rename_shard(*__lowerCAmelCase ) ).collect()
else:
# don't use any pattern
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCAmelCase , '''''' ) , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 175 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = math.sqrt(__a )
UpperCamelCase__ :Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a ( __a , __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __a ):
for j in range(0 , __a ):
UpperCamelCase__ :Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__a , __a )
def a ( __a , __a , __a , __a , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = np.zeros(img.shape )
UpperCamelCase__ :str = get_gauss_kernel(__a , __a )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
UpperCamelCase__ :str = get_slice(__a , __a , __a , __a )
UpperCamelCase__ :Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCamelCase__ :Optional[Any] = vec_gaussian(__a , __a )
UpperCamelCase__ :int = np.multiply(__a , __a )
UpperCamelCase__ :Optional[Any] = np.multiply(__a , __a )
UpperCamelCase__ :Dict = np.sum(__a ) / np.sum(__a )
UpperCamelCase__ :List[str] = val
return imga
def a ( __a ) -> tuple:
'''simple docstring'''
UpperCamelCase__ :List[str] = args[1] if args[1:] else '''../image_data/lena.jpg'''
UpperCamelCase__ :Union[str, Any] = float(args[2] ) if args[2:] else 1.0
UpperCamelCase__ :Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCamelCase__ :Tuple = int(args[4] )
UpperCamelCase__ :Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCamelCase__ :Optional[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__snake_case , __snake_case , __snake_case , __snake_case = parse_args(sys.argv)
__snake_case = cva.imread(filename, 0)
cva.imshow('''input image''', img)
__snake_case = img / 255
__snake_case = out.astype('''float32''')
__snake_case = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__snake_case = out * 255
__snake_case = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows() | 97 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , _a , )
super().__init__(*_a , **_a )
| 369 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 5_12,
'''google/realm-cc-news-pretrained-encoder''': 5_12,
'''google/realm-cc-news-pretrained-scorer''': 5_12,
'''google/realm-cc-news-pretrained-openqa''': 5_12,
'''google/realm-orqa-nq-openqa''': 5_12,
'''google/realm-orqa-nq-reader''': 5_12,
'''google/realm-orqa-wq-openqa''': 5_12,
'''google/realm-orqa-wq-reader''': 5_12,
}
__lowerCamelCase : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = RealmTokenizer
def __init__( self : Any,_A : Tuple=None,_A : Union[str, Any]=None,_A : str=True,_A : Tuple="[UNK]",_A : List[str]="[SEP]",_A : List[str]="[PAD]",_A : int="[CLS]",_A : Dict="[MASK]",_A : str=True,_A : int=None,**_A : List[str],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase",_A ) != do_lower_case
or normalizer_state.get("strip_accents",_A ) != strip_accents
or normalizer_state.get("handle_chinese_chars",_A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_A,normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE_ : int = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : str = normalizer_class(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = do_lower_case
def __UpperCamelCase ( self : Tuple,_A : List[Any],**_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE_ : Dict = text
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("text_pair",_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop("return_tensors",_A )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_A ):
if batch_text_pair is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_text_pair[idx]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = super().__call__(_A,_A,return_tensors=_A,**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = encoded_candidates.get("input_ids" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoded_candidates.get("attention_mask" )
SCREAMING_SNAKE_CASE_ : Optional[int] = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_A )
SCREAMING_SNAKE_CASE_ : Any = {key: item for key, item in output_data.items() if len(_A ) != 0}
return BatchEncoding(_A,tensor_type=_A )
def __UpperCamelCase ( self : Dict,_A : Optional[Any],_A : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Optional[int],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Any,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
"""simple docstring"""
import torch
from torch import nn
class UpperCamelCase_ (nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=False ) -> Any:
super().__init__()
UpperCAmelCase_ : str = n_token
UpperCAmelCase_ : Tuple = d_embed
UpperCAmelCase_ : Tuple = d_proj
UpperCAmelCase_ : str = cutoffs + [n_token]
UpperCAmelCase_ : List[str] = [0] + self.cutoffs
UpperCAmelCase_ : int = div_val
UpperCAmelCase_ : str = self.cutoffs[0]
UpperCAmelCase_ : Any = len(self.cutoffs ) - 1
UpperCAmelCase_ : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase_ : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList()
UpperCAmelCase_ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
UpperCAmelCase_ : Tuple = keep_order
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Any:
if proj is None:
UpperCAmelCase_ : str = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase_ : Tuple = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
UpperCAmelCase_ : List[Any] = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=False ) -> Tuple:
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase_ : Dict = hidden[..., :-1, :].contiguous()
UpperCAmelCase_ : List[Any] = labels[..., 1:].contiguous()
UpperCAmelCase_ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase_ : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase_ : Optional[int] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase_ : Dict = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase_ : Optional[Any] = labels != -100
UpperCAmelCase_ : List[str] = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase_ : Optional[Any] = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase_ : List[str] = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : Optional[Any] = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase_ : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : Union[str, Any] = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
UpperCAmelCase_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase_ : Any = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
UpperCAmelCase_ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase_ : Dict = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase_ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase_ : Optional[Any] = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
UpperCAmelCase_ : List[Any] = head_logprob.index_select(0 , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = hidden.index_select(0 , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Dict = hidden
if i == 0:
if labels is not None:
UpperCAmelCase_ : Optional[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ : Tuple = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : Union[str, Any] = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
UpperCAmelCase_ : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase_ : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase_ : Dict = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> int:
if self.n_clusters == 0:
UpperCAmelCase_ : Dict = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : List[str] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : str = self.out_layers[i].weight
UpperCAmelCase_ : int = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase_ : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : List[str] = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase_ : Union[str, Any] = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
UpperCAmelCase_ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
UpperCAmelCase_ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase_ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ : List[Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : Tuple = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
UpperCAmelCase_ : Dict = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase_ : Tuple = logprob_i
return out | 358 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCamelCase_ = tuple[int, int]
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : set[int] , lowerCAmelCase_ : Mapping[EdgeT, int] ) -> None:
UpperCAmelCase_ : set[int] = vertices
UpperCAmelCase_ : dict[EdgeT, int] = {
(min(lowerCAmelCase_ ), max(lowerCAmelCase_ )): weight for edge, weight in edges.items()
}
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : EdgeT , lowerCAmelCase_ : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ : Tuple = weight
def _SCREAMING_SNAKE_CASE ( self : str ) -> Graph:
UpperCAmelCase_ : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ : Tuple = edge
UpperCAmelCase_ : Dict = weight
subgraph.add_edge(lowerCAmelCase_ , lowerCAmelCase_ )
return subgraph
def snake_case ( A__ = "p107_network.txt" ):
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(A__ ) )
UpperCAmelCase_ : str = os.path.join(A__ ,A__ )
UpperCAmelCase_ : dict[EdgeT, int] = {}
UpperCAmelCase_ : list[str]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
with open(A__ ) as f:
UpperCAmelCase_ : Dict = f.read().strip().split("\n" )
UpperCAmelCase_ : str = [line.split("," ) for line in data]
for edgea in range(1 ,len(A__ ) ):
for edgea in range(A__ ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ : Graph = Graph(set(range(len(A__ ) ) ) ,A__ )
UpperCAmelCase_ : Graph = graph.prims_algorithm()
UpperCAmelCase_ : int = sum(graph.edges.values() )
UpperCAmelCase_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 253 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = """unispeech"""
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_28 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=3_20 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_00 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_56 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) ->List[Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a_ = hidden_size
a_ = feat_extract_norm
a_ = feat_extract_activation
a_ = list(__UpperCAmelCase)
a_ = list(__UpperCAmelCase)
a_ = list(__UpperCAmelCase)
a_ = conv_bias
a_ = num_conv_pos_embeddings
a_ = num_conv_pos_embedding_groups
a_ = len(self.conv_dim)
a_ = num_hidden_layers
a_ = intermediate_size
a_ = hidden_act
a_ = num_attention_heads
a_ = hidden_dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = feat_proj_dropout
a_ = final_dropout
a_ = layerdrop
a_ = layer_norm_eps
a_ = initializer_range
a_ = num_ctc_classes
a_ = vocab_size
a_ = do_stable_layer_norm
a_ = use_weighted_layer_sum
a_ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ = apply_spec_augment
a_ = mask_time_prob
a_ = mask_time_length
a_ = mask_time_min_masks
a_ = mask_feature_prob
a_ = mask_feature_length
a_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a_ = num_codevectors_per_group
a_ = num_codevector_groups
a_ = contrastive_logits_temperature
a_ = feat_quantizer_dropout
a_ = num_negatives
a_ = codevector_dim
a_ = proj_codevector_dim
a_ = diversity_loss_weight
# ctc loss
a_ = ctc_loss_reduction
a_ = ctc_zero_infinity
# pretraining loss
a_ = replace_prob
@property
def UpperCAmelCase__ ( self) ->str:
return functools.reduce(operator.mul , self.conv_stride , 1) | 303 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) | 303 | 1 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCAmelCase__ ) * abs(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 101 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 364 | """simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32 | 0 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(f'''| 0 | 0 | {nor_gate(0 ,0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 ,1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 ,0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 ,1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 278 | 0 |
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141 |
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
if not input_list:
return []
A__ : Any = [input_list.count(lowercase_ ) for value in input_list]
A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 1 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """file.csv"""
SCREAMING_SNAKE_CASE__ : str = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__lowerCAmelCase , """w""" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
@pytest.fixture
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = tmp_path / """malformed_file.csv"""
SCREAMING_SNAKE_CASE__ : Optional[int] = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__lowerCAmelCase , """w""" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
@pytest.fixture
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str = tmp_path / """csv_with_image.csv"""
SCREAMING_SNAKE_CASE__ : Optional[int] = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(__lowerCAmelCase , """w""" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
@pytest.fixture
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = tmp_path / """csv_with_label.csv"""
SCREAMING_SNAKE_CASE__ : int = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__lowerCAmelCase , """w""" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
@pytest.fixture
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : int = tmp_path / """csv_with_int_list.csv"""
SCREAMING_SNAKE_CASE__ : List[str] = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__lowerCAmelCase , """w""" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Csv()
SCREAMING_SNAKE_CASE__ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__lowerCAmelCase , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _lowercase ( __lowerCAmelCase ) -> Dict:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Any = f.read().splitlines()[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
SCREAMING_SNAKE_CASE__ : List[Any] = csv._generate_tables([[csv_file_with_image]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def _lowercase ( __lowerCAmelCase ) -> int:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = f.read().splitlines()[1:]
SCREAMING_SNAKE_CASE__ : int = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
SCREAMING_SNAKE_CASE__ : List[str] = csv._generate_tables([[csv_file_with_label]] )
SCREAMING_SNAKE_CASE__ : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__lowerCAmelCase ) for label in labels]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __lowerCAmelCase : [int(__lowerCAmelCase ) for i in x.split()]} )
SCREAMING_SNAKE_CASE__ : Any = csv._generate_tables([[csv_file_with_int_list]] )
SCREAMING_SNAKE_CASE__ : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 132 |
"""simple docstring"""
import os
import sys
a :Union[str, Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a :int = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 132 | 1 |
def _A ( lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = set({"(", "[", "{"} )
lowerCAmelCase__ = set({")", "]", "}"} )
lowerCAmelCase__ = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(__a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__a ) == 0 or (len(__a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__a ) == 0
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = input("Enter sequence of brackets: " )
if is_balanced(__a ):
print(__a , "is balanced" )
else:
print(__a , "is not balanced" )
if __name__ == "__main__":
main()
| 365 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase = F"""down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase = F"""up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase = F"""up_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
UpperCamelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase = 'mid_block.attentions.0.'
UpperCamelCase = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase = F"""mid_block.resnets.{j}."""
UpperCamelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0."""
UpperCamelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase = F"""mid_block.resnets.{i}."""
UpperCamelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
lowerCAmelCase__ = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase = {'q': 0, 'k': 1, 'v': 2}
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCAmelCase__ = k[: -len(".q_proj.weight" )]
lowerCAmelCase__ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCAmelCase__ = k[: -len(".q_proj.bias" )]
lowerCAmelCase__ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
return new_state_dict
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase = load_file(unet_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCamelCase = load_file(vae_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCamelCase = load_file(text_enc_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCamelCase = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCamelCase = convert_unet_state_dict(unet_state_dict)
UpperCamelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase = convert_vae_state_dict(vae_state_dict)
UpperCamelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 221 | 0 |
'''simple docstring'''
import os
import sys
a : str = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a : Optional[Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Any:
'''simple docstring'''
return AutoModel.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCAmelCase, **__UpperCAmelCase )
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Dict = logging.get_logger(__name__)
a : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _lowerCamelCase ):
snake_case_ = "marian"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ):
snake_case_ = vocab_size
snake_case_ = decoder_vocab_size or vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class a ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A_ ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ = {0: '''batch'''}
snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A_ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(lowercase_ , self ).outputs
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
snake_case_ = common_inputs['''decoder_input_ids'''].shape[1]
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ ,snake_case_ = self.num_layers
snake_case_ = min(lowercase_ , lowercase_ )
snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers
snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ ,snake_case_ = self.num_layers
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs['''attention_mask'''].dtype
snake_case_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
snake_case_ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
snake_case_ = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@property
def A_ ( self : List[str] ):
return 1e-4
| 56 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0.0
for coeff in reversed(_lowerCAmelCase ):
__lowercase =result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 357 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =SwinConfig(image_size=192 )
if "base" in model_name:
__lowercase =6
__lowercase =128
__lowercase =(2, 2, 18, 2)
__lowercase =(4, 8, 16, 32)
elif "large" in model_name:
__lowercase =12
__lowercase =192
__lowercase =(2, 2, 18, 2)
__lowercase =(6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__lowercase =window_size
__lowercase =embed_dim
__lowercase =depths
__lowercase =num_heads
return config
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if "encoder.mask_token" in name:
__lowercase =name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__lowercase =name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__lowercase =name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__lowercase ='layernorm.weight'
if name == "encoder.norm.bias":
__lowercase ='layernorm.bias'
if "decoder" in name:
pass
else:
__lowercase ='swin.' + name
return name
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(_lowerCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[2] )
__lowercase =int(key_split[4] )
__lowercase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[
dim : dim * 2, :
]
__lowercase =val[-dim:, :]
else:
__lowercase =val[
:dim
]
__lowercase =val[
dim : dim * 2
]
__lowercase =val[
-dim:
]
else:
__lowercase =val
return orig_state_dict
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =torch.load(_lowerCAmelCase , map_location='cpu' )['model']
__lowercase =get_swin_config(_lowerCAmelCase )
__lowercase =SwinForMaskedImageModeling(_lowerCAmelCase )
model.eval()
__lowercase =convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =ViTImageProcessor(size={'height': 192, 'width': 192} )
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt' )
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 48 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=50 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=None , ) -> Dict:
A_ : str = parent
A_ : Optional[int] = batch_size
A_ : Optional[Any] = seq_length
A_ : Any = is_training
A_ : Dict = use_input_mask
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : str = num_hidden_layers
A_ : str = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : int = hidden_act
A_ : str = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : int = initializer_range
A_ : List[Any] = use_labels
A_ : List[str] = scope
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = None
if self.use_input_mask:
A_ : int = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self ) -> Any:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self ) -> int:
(
A_
) : Optional[int] = self.prepare_config_and_inputs()
A_ : Union[str, Any] = True
A_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) -> Union[str, Any]:
A_ : int = BertGenerationEncoder(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ )
A_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) -> int:
A_ : str = True
A_ : Dict = BertGenerationEncoder(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
A_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) -> Any:
A_ : Optional[Any] = True
A_ : int = True
A_ : Optional[int] = BertGenerationDecoder(config=lowercase_ ).to(lowercase_ ).eval()
# first forward pass
A_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
A_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
A_ : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , ) -> Optional[Any]:
A_ : Optional[Any] = BertGenerationDecoder(lowercase_ )
model.to(lowercase_ )
model.eval()
A_ : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = self.prepare_config_and_inputs()
A_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a_, a_, a_, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCamelCase = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCamelCase = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self ) -> int:
A_ : Tuple = BertGenerationEncoderTester(self )
A_ : List[str] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self ) -> Any:
A_ : str = self.model_tester.prepare_config_and_inputs()
A_ : Optional[Any] = '''bert'''
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCAmelCase_ ( self ) -> Any:
# This regression test was failing with PyTorch < 1.3
(
A_
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : str = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowercase_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> int:
A_ : List[str] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A_ : List[Any] = model(lowercase_ )[0]
A_ : Any = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowercase_ )
A_ : int = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A_ : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A_ : Optional[Any] = model(lowercase_ )[0]
A_ : Union[str, Any] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , lowercase_ )
A_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 344 |
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__UpperCamelCase : str = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__UpperCamelCase : Dict = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__UpperCamelCase : int = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 106 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __a ( A__ ):
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : torch.FloatTensor
class __a ( A__ , A__ ):
_lowerCAmelCase : Union[str, Any] = 1
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int = 20_00 , SCREAMING_SNAKE_CASE : float = 0.1_5 , SCREAMING_SNAKE_CASE : float = 0.0_1 , SCREAMING_SNAKE_CASE : float = 1_3_4_8.0 , SCREAMING_SNAKE_CASE : float = 1e-5 , SCREAMING_SNAKE_CASE : int = 1 , ):
'''simple docstring'''
UpperCamelCase__ : str = sigma_max
# setable values
UpperCamelCase__ : str = None
self.set_sigmas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None ):
'''simple docstring'''
return sample
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ : Any = torch.linspace(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : float = None ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ : Optional[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ : Optional[int] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE ) , math.log(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
UpperCamelCase__ : Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ : List[str] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ : List[str] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase__ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase__ : Tuple = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(sample.device )
UpperCamelCase__ : Optional[Any] = torch.zeros_like(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ : Any = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase__ : List[Any] = diffusion.unsqueeze(-1 )
UpperCamelCase__ : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ : Any = randn_tensor(
sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE , device=sample.device , dtype=sample.dtype )
UpperCamelCase__ : List[str] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE , prev_sample_mean=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ : Optional[Any] = randn_tensor(sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ : List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase__ : Union[str, Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase__ : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ : Union[str, Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase__ : List[Any] = step_size.unsqueeze(-1 )
UpperCamelCase__ : Optional[int] = sample + step_size * model_output
UpperCamelCase__ : Union[str, Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ):
'''simple docstring'''
UpperCamelCase__ : str = timesteps.to(original_samples.device )
UpperCamelCase__ : Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase__ : Dict = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE ) * sigmas[:, None, None, None]
)
UpperCamelCase__ : str = noise + original_samples
return noisy_samples
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps | 196 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCamelCase__ : int = (boundary[1] - boundary[0]) / steps
UpperCamelCase__ : Optional[Any] = boundary[0]
UpperCamelCase__ : List[Any] = boundary[1]
UpperCamelCase__ : List[Any] = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : int = 0.0
y += (h / 2.0) * f(__lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(__lowerCAmelCase )
y += (h / 2.0) * f(__lowerCAmelCase )
return y
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
UpperCamelCase__ : Optional[int] = a + h
while x < (b - h):
yield x
UpperCamelCase__ : Union[str, Any] = x + h
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: # enter your function here
UpperCamelCase__ : Dict = (x - 0) * (x - 0)
return y
def SCREAMING_SNAKE_CASE ( ) -> Dict:
UpperCamelCase__ : List[Any] = 0.0 # Lower bound of integration
UpperCamelCase__ : Tuple = 1.0 # Upper bound of integration
UpperCamelCase__ : Any = 1_0.0 # define number of steps or resolution
UpperCamelCase__ : List[str] = [a, b] # define boundary of integration
UpperCamelCase__ : Any = method_a(__lowerCAmelCase , __lowerCAmelCase )
print(f'y = {y}' )
if __name__ == "__main__":
main() | 196 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 340 |
"""simple docstring"""
def __A ( a_ :int = 60_08_51_47_51_43) -> int:
try:
__a : List[Any] = int(a_)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
__a : int = 1
__a : List[Any] = 2
while i * i <= n:
while n % i == 0:
__a : List[str] = i
n //= i
i += 1
if n > 1:
__a : Optional[int] = n
return int(a_)
if __name__ == "__main__":
print(F'{solution() = }') | 160 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( _UpperCAmelCase ):
lowercase__ : List[str] = ["image_processor", "tokenizer"]
lowercase__ : int = "ViTImageProcessor"
lowercase__ : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Union[str, Any] , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , **_UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
SCREAMING_SNAKE_CASE = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Dict=None , **_UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __snake_case( self : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : int ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def __snake_case( self : Optional[int] ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 352 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( a ):
lowercase__ : Optional[Any] = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ : Optional[int] = """CIDAS/clipseg-rd64-refined"""
lowercase__ : Tuple = """image_segmenter"""
lowercase__ : Optional[Any] = CLIPSegForImageSegmentation
lowercase__ : int = ["""image""", """text"""]
lowercase__ : List[str] = ["""image"""]
def __init__( self : str , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : "Image" , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCamelCase , return_tensors="pt" )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE = self.model(**_UpperCamelCase ).logits
return logits
def __snake_case( self : Any , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 206 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> int: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> str: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[int] = [1, 2]
UpperCamelCase : Union[str, Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Optional[Any] = {"a": {"1": 1}, "b": 2}
UpperCamelCase : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : List[str] = []
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : str = [2, 3]
UpperCamelCase : str = {"a": 2, "b": 3}
UpperCamelCase : Optional[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : List[str] = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : Any = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : Optional[int] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : Optional[Any] = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Optional[Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"a": 1, "b": 2}
UpperCamelCase : Union[str, Any] = {"a": 3, "b": 4}
UpperCamelCase : Optional[int] = {"a": 5, "b": 6}
UpperCamelCase : Tuple = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :int = 'bar'
UpperCamelCase : Dict = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : int = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : Optional[int] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : Dict = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[int] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[str] = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Any = torch.nn.Linear(3 , 2 )
UpperCamelCase : Union[str, Any] = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : List[str] = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : str = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Tuple = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : int = gen_random_output()
UpperCamelCase : Any = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : Dict = A(x=1 , y="foobar" )
UpperCamelCase : Optional[Any] = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : Tuple = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : str = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Any:
return text.split()
def A_ ( _lowerCAmelCase ) -> Optional[int]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> int:
with Pool(2 ) as pool:
UpperCamelCase : Optional[Any] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Tuple = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : List[str] = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 52 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ : str = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Any = jax.device_count()
UpperCamelCase_ : List[str] = num_samples * [prompt]
UpperCamelCase_ : List[Any] = sd_pipe.prepare_inputs(snake_case )
UpperCamelCase_ : Dict = replicate(snake_case )
UpperCamelCase_ : Optional[Any] = shard(snake_case )
UpperCamelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCamelCase_ : Tuple = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase_ : Optional[Any] = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=2_5 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : List[str] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'stabilityai/stable-diffusion-2'
UpperCamelCase_, UpperCamelCase_ : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case , subfolder='scheduler' )
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
snake_case , scheduler=snake_case , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ : Optional[int] = scheduler_params
UpperCamelCase_ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Union[str, Any] = jax.device_count()
UpperCamelCase_ : Union[str, Any] = num_samples * [prompt]
UpperCamelCase_ : Tuple = sd_pipe.prepare_inputs(snake_case )
UpperCamelCase_ : List[Any] = replicate(snake_case )
UpperCamelCase_ : Optional[Any] = shard(snake_case )
UpperCamelCase_ : Tuple = jax.random.PRNGKey(0 )
UpperCamelCase_ : str = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase_ : str = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=2_5 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : Union[str, Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 175 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = 0
def snake_case ( self ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
__lowerCAmelCase = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
__lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("clip-base" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def snake_case ( self ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 259 |
"""simple docstring"""
from __future__ import annotations
import time
A : Union[str, Any] = list[tuple[int, int]]
A : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a ):
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = False
def snake_case ( self ):
while self.node_queue:
__lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(__a )
__lowerCAmelCase = self.get_successors(__a )
for node in successors:
self.node_queue.append(__a )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , __a ):
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) )
return successors
def snake_case ( self , __a ):
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = False
def snake_case ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCAmelCase = True
return self.retrace_bidirectional_path(
__a , __a )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__a ),
self.bwd_bfs: self.bwd_bfs.get_successors(__a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.fwd_bfs.retrace_path(__a )
__lowerCAmelCase = self.bwd_bfs.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : List[Any] = (0, 0)
A : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Dict = BreadthFirstSearch(init, goal)
A : Any = bfs.search()
A : List[str] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
A : Optional[Any] = time.time()
A : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
A : Any = bd_bfs.search()
A : str = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 259 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.