code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[str]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Tuple=1 / 255 , UpperCAmelCase__ : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_a : Optional[Any] = parent
_a : List[str] = batch_size
_a : Optional[Any] = num_channels
_a : Optional[Any] = min_resolution
_a : str = max_resolution
_a : Tuple = do_resize
_a : Any = size
_a : List[Any] = do_normalize
_a : List[Any] = image_mean
_a : Union[str, Any] = image_std
_a : int = do_rescale
_a : Optional[int] = rescale_factor
_a : List[Any] = do_pad
def _lowercase ( self : Optional[Any] ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=False ) -> int:
if not batched:
_a : Tuple = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
_a , _a : Union[str, Any] = image.size
else:
_a , _a : Tuple = image.shape[1], image.shape[2]
if w < h:
_a : Any = int(self.size["""shortest_edge"""] * h / w )
_a : Optional[int] = self.size["""shortest_edge"""]
elif w > h:
_a : Optional[Any] = self.size["""shortest_edge"""]
_a : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
_a : List[Any] = self.size["""shortest_edge"""]
_a : str = self.size["""shortest_edge"""]
else:
_a : int = []
for image in image_inputs:
_a , _a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a : Dict = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
_a : Optional[int] = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : int = YolosImageProcessor if is_vision_available() else None
def _lowercase ( self : Dict ) -> Union[str, Any]:
_a : int = YolosImageProcessingTester(self )
@property
def _lowercase ( self : Any ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """size""" ) )
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
_a : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def _lowercase ( self : int ) -> Optional[int]:
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
# Initialize image_processing
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
_a : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_a , _a : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
_a : List[str] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Optional[Any] ) -> Tuple:
# Initialize image_processing
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
_a : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_a , _a : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a : int = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
_a , _a : Tuple = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[str] ) -> Dict:
# Initialize image_processing
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
_a : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_a , _a : Any = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a : str = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
_a , _a : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processings
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
_a : Any = self.image_processing_class(do_resize=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_rescale=UpperCAmelCase__ )
# create random PyTorch tensors
_a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_a : Optional[Any] = image_processing_a.pad(UpperCAmelCase__ , return_tensors="""pt""" )
_a : str = image_processing_a(UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def _lowercase ( self : Optional[Any] ) -> Tuple:
# prepare image and target
_a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_a : List[str] = json.loads(f.read() )
_a : Optional[int] = {"""image_id""": 39769, """annotations""": target}
# encode them
_a : Any = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
_a : List[Any] = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
_a : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase__ )
_a : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
_a : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase__ ) )
# verify boxes
_a : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase__ )
_a : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
_a : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase__ ) )
# verify is_crowd
_a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase__ ) )
# verify class_labels
_a : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase__ ) )
# verify orig_size
_a : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase__ ) )
# verify size
_a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase__ ) )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
# prepare image, target and masks_path
_a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_a : Optional[Any] = json.loads(f.read() )
_a : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
_a : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_a : Dict = YolosImageProcessor(format="""coco_panoptic""" )
_a : Union[str, Any] = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
_a : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase__ )
_a : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
_a : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase__ ) )
# verify boxes
_a : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase__ )
_a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
_a : str = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase__ ) )
# verify is_crowd
_a : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase__ ) )
# verify class_labels
_a : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase__ ) )
# verify masks
_a : int = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase__ )
# verify orig_size
_a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase__ ) )
# verify size
_a : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase__ ) )
| 294
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294
| 1
|
"""simple docstring"""
import string
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Any = ''''''
for i in sequence:
lowerCAmelCase : str = ord(_snake_case )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _snake_case ( _snake_case : str ):
lowerCAmelCase : int = string.ascii_letters
lowerCAmelCase : Dict = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_snake_case )] if c in letters else c for c in sequence )
def _snake_case ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
lowerCAmelCase : Union[str, Any] = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=_snake_case )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=_snake_case )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 314
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314
| 1
|
def __lowerCAmelCase ( a__ ) -> List[Any]:
__a = 0
__a = len(a__ )
for i in range(n - 1 ):
for j in range(i + 1 , a__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __lowerCAmelCase ( a__ ) -> Dict:
if len(a__ ) <= 1:
return arr, 0
__a = len(a__ ) // 2
__a = arr[0:mid]
__a = arr[mid:]
__a , __a = count_inversions_recursive(a__ )
__a , __a = count_inversions_recursive(a__ )
__a , __a = _count_cross_inversions(a__ , a__ )
__a = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __lowerCAmelCase ( a__ , a__ ) -> Optional[Any]:
__a = []
__a = __a = __a = 0
while i < len(a__ ) and j < len(a__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(a__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __lowerCAmelCase ( ) -> Any:
__a = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__a = count_inversions_bf(a__ )
__a , __a = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , a__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__a = count_inversions_bf(a__ )
__a , __a = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , a__ )
# an empty list should also have zero inversions
__a = []
__a = count_inversions_bf(a__ )
__a , __a = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , a__ )
if __name__ == "__main__":
main()
| 6
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
| 6
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ = state_dict.pop(_UpperCAmelCase )
snake_case__ = val
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
snake_case__ = value
else:
snake_case__ = value
return new_state_dict
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[:256, :]
snake_case__ = in_proj_bias[:256]
snake_case__ = in_proj_weight[256:512, :]
snake_case__ = in_proj_bias[256:512]
snake_case__ = in_proj_weight[-256:, :]
snake_case__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[:256, :]
snake_case__ = in_proj_bias[:256]
snake_case__ = in_proj_weight[256:512, :]
snake_case__ = in_proj_bias[256:512]
snake_case__ = in_proj_weight[-256:, :]
snake_case__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ = in_proj_weight_cross_attn[:256, :]
snake_case__ = in_proj_bias_cross_attn[:256]
snake_case__ = in_proj_weight_cross_attn[256:512, :]
snake_case__ = in_proj_bias_cross_attn[256:512]
snake_case__ = in_proj_weight_cross_attn[-256:, :]
snake_case__ = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ = image.size
snake_case__ = max(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ = 800 if "detection" in checkpoint_url else 1000
snake_case__ = target_max_size / current_max_size
snake_case__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ = F.to_tensor(_UpperCAmelCase )
snake_case__ = F.normalize(_UpperCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
snake_case__ = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case__ = rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ = "model."
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case__ = state_dict.pop(_UpperCAmelCase )
snake_case__ = val
# create HuggingFace model and load state dict
snake_case__ = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
snake_case__ = 15
snake_case__ = 2
snake_case__ = {0: "table", 1: "table rotated"}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
else:
snake_case__ = 125
snake_case__ = 6
snake_case__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
snake_case__ = TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
snake_case__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
snake_case__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_UpperCAmelCase )
snake_case__ = Image.open(_UpperCAmelCase ).convert('''RGB''' )
snake_case__ = normalize(resize(_UpperCAmelCase , _UpperCAmelCase ) ).unsqueeze(0 )
snake_case__ = model(_UpperCAmelCase )
if "detection" in checkpoint_url:
snake_case__ = (1, 15, 3)
snake_case__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
snake_case__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
snake_case__ = (1, 125, 7)
snake_case__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
snake_case__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
snake_case__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
A__ = {
'''openbmb/cpm-ant-10b''': 1024,
}
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : str = collections.OrderedDict()
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
snake_case__ : List[Any] = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case__ : str = token.rstrip('''\n''' )
snake_case__ : int = index
return vocab
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :str ,__lowercase :int="<unk>" ,__lowercase :Tuple=2_0_0 ):
snake_case__ : Union[str, Any] = vocab
snake_case__ : str = unk_token
snake_case__ : Dict = max_input_chars_per_word
def __lowerCamelCase ( self :Tuple ,__lowercase :Dict ):
snake_case__ : Optional[Any] = list(__lowercase )
if len(__lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case__ : List[Any] = 0
snake_case__ : List[str] = []
while start < len(__lowercase ):
snake_case__ : Any = len(__lowercase )
snake_case__ : Any = None
while start < end:
snake_case__ : Tuple = ''''''.join(chars[start:end] )
if substr in self.vocab:
snake_case__ : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowercase )
snake_case__ : Union[str, Any] = end
return sub_tokens
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Tuple = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : Optional[Any] = False
def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :Dict="<d>" ,__lowercase :List[Any]="</d>" ,__lowercase :Union[str, Any]="<s>" ,__lowercase :List[str]="</s>" ,__lowercase :str="<pad>" ,__lowercase :Tuple="<unk>" ,__lowercase :Tuple="</n>" ,__lowercase :List[Any]="</_>" ,__lowercase :str="left" ,**__lowercase :Optional[Any] ,):
requires_backends(self ,['''jieba'''] )
super().__init__(
bod_token=__lowercase ,eod_token=__lowercase ,bos_token=__lowercase ,eos_token=__lowercase ,pad_token=__lowercase ,unk_token=__lowercase ,line_token=__lowercase ,space_token=__lowercase ,padding_side=__lowercase ,**__lowercase ,)
snake_case__ : List[str] = bod_token
snake_case__ : List[Any] = eod_token
snake_case__ : List[Any] = load_vocab(__lowercase )
snake_case__ : Any = self.encoder[space_token]
snake_case__ : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __lowercase : x[1] ) )
snake_case__ : Any = {v: k for k, v in self.encoder.items()}
snake_case__ : Any = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.encoder[self.bod_token]
@property
def __lowerCamelCase ( self :Union[str, Any] ):
return self.encoder[self.eod_token]
@property
def __lowerCamelCase ( self :List[str] ):
return self.encoder["\n"]
@property
def __lowerCamelCase ( self :Tuple ):
return len(self.encoder )
def __lowerCamelCase ( self :Any ):
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self :str ,__lowercase :Dict ):
snake_case__ : Tuple = []
for x in jieba.cut(__lowercase ,cut_all=__lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) )
return output_tokens
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[Any] ,**__lowercase :Union[str, Any] ):
snake_case__ : Dict = [i for i in token_ids if i >= 0]
snake_case__ : Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowercase ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :List[str] ):
return token in self.encoder
def __lowerCamelCase ( self :int ,__lowercase :List[str] ):
return "".join(__lowercase )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Optional[int] ):
return self.encoder.get(__lowercase ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self :Tuple ,__lowercase :int ):
return self.decoder.get(__lowercase ,self.unk_token )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
if os.path.isdir(__lowercase ):
snake_case__ : int = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
snake_case__ : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
snake_case__ : List[str] = 0
if " " in self.encoder:
snake_case__ : Dict = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
snake_case__ : Union[str, Any] = self.encoder['''\n''']
del self.encoder["\n"]
snake_case__ : Dict = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __lowercase : x[1] ) )
with open(__lowercase ,'''w''' ,encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
snake_case__ : str = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __lowerCamelCase ( self :Tuple ,__lowercase :List[int] ,__lowercase :List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCamelCase ( self :int ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase ))
return [1] + ([0] * len(__lowercase ))
| 44
| 0
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return number | (1 << position)
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return number & ~(1 << position)
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return number ^ (1 << position)
def lowercase ( A_ , A_ )-> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : Any , A : str ):
'''simple docstring'''
with open(A , encoding='utf-8' ) as input_file:
a : Any = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
a : Optional[int] = input_file.read()
a : int = regexp.search(A )
return match
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
with open(A , encoding='utf-8' ) as input_file:
a : Union[str, Any] = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
a : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a : Optional[Any] = regexp.finditer(A )
a : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Optional[int] = Path('./datasets' )
a : Tuple = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Tuple = Path('./datasets' )
a : List[str] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 186
|
"""simple docstring"""
def snake_case (A_ :str , A_ :bool = False ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
a : Union[str, Any] = f'''Expected string as input, found {type(A_ )}'''
raise ValueError(A_ )
if not isinstance(A_ , A_ ):
a : Optional[int] = f'''Expected boolean as use_pascal parameter, found {type(A_ )}'''
raise ValueError(A_ )
a : Tuple = input_str.split('_' )
a : Dict = 0 if use_pascal else 1
a : int = words[start_index:]
a : int = [word[0].upper() + word[1:] for word in words_to_capitalize]
a : List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 186
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : Any = None
lowerCamelCase : Dict = BloomTokenizerFast
lowerCamelCase : Any = BloomTokenizerFast
lowerCamelCase : int = True
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Tuple = """tokenizer_file"""
lowerCamelCase : Optional[int] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase__ = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowercase__ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowercase__ = tokenizer.batch_encode_plus(lowerCamelCase__ )['input_ids']
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__=6 ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ = 'This is a simple input'
lowercase__ = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ = ('This is a simple input', 'This is a pair')
lowercase__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.encode_plus(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.batch_encode_plus(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.encode(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.batch_encode_plus(lowerCamelCase__ , max_length=lowerCamelCase__ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase__ = None # Hotfixing padding = None
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=lowerCamelCase__ )
lowercase__ = next(iter(lowerCamelCase__ ) )['premise'] # pick up one data
lowercase__ = list(sample_data.values() )
lowercase__ = list(map(tokenizer.encode , lowerCamelCase__ ) )
lowercase__ = [tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) for x in output_tokens]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 164
|
"""simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case_( a__ ):
def __init__( self : Tuple , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : List[Any] , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : List[str] = field
lowerCAmelCase : int = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
lowerCAmelCase : Dict = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase : List[str] = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowerCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Dataset , UpperCamelCase_ : Union[PathLike, BinaryIO] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowerCAmelCase : List[Any] = dataset
lowerCAmelCase : Optional[Any] = path_or_buf
lowerCAmelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase : Union[str, Any] = num_proc
lowerCAmelCase : Dict = '''utf-8'''
lowerCAmelCase : Union[str, Any] = to_json_kwargs
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
lowerCAmelCase : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
lowerCAmelCase : List[str] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
lowerCAmelCase : Dict = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
lowerCAmelCase : Optional[Any] = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
lowerCAmelCase : Optional[int] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
lowerCAmelCase : List[str] = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : int = args
lowerCAmelCase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase : int = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : BinaryIO , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
lowerCAmelCase : Optional[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
lowerCAmelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase , __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = load_tool('''text-to-speech''' )
self.tool.setup()
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = self.tool('''hey''' )
_UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = self.tool('''hey''' )
_UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 324
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
_UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = after_output[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size )
_UpperCamelCase = to_atuple(vision_model.config.patch_size )
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCamelCase = inputs_dict
_UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCamelCase = config_inputs_dict.pop('''text_config''' )
_UpperCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = after_outputs[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxViTModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxViTModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 324
| 1
|
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase__ ( __magic_name__ ):
# to overwrite at feature extractactor specific tests
SCREAMING_SNAKE_CASE_ =None
SCREAMING_SNAKE_CASE_ =None
@property
def __a ( self : List[Any] ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case__ , "feature_size" ) )
self.assertTrue(hasattr(snake_case__ , "sampling_rate" ) )
self.assertTrue(hasattr(snake_case__ , "padding_value" ) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Tuple = feat_extract.model_input_names[0]
UpperCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case__ ) == len(snake_case__ ) for x, y in zip(snake_case__ , processed_features[input_name] ) ) )
UpperCAmelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case__ )
UpperCAmelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : int = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase__ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Tuple = feat_extract.model_input_names[0]
UpperCAmelCase__ : str = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCAmelCase__ : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __a ( self : Union[str, Any] , snake_case__ : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(snake_case__ : Union[str, Any] ):
UpperCAmelCase__ : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(snake_case__ ) != length:
return False
return True
def _inputs_are_equal(snake_case__ : str , snake_case__ : List[str] ):
if len(snake_case__ ) != len(snake_case__ ):
return False
for input_slice_a, input_slice_a in zip(snake_case__ , snake_case__ ):
if not np.allclose(np.asarray(snake_case__ ) , np.asarray(snake_case__ ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case__ )
UpperCAmelCase__ : Any = feat_extract.model_input_names[0]
UpperCAmelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : List[str] = self.feat_extract_tester.seq_length_diff
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase__ : Dict = self.feat_extract_tester.min_seq_length
UpperCAmelCase__ : Dict = self.feat_extract_tester.batch_size
UpperCAmelCase__ : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase__ : List[Any] = feat_extract.pad(snake_case__ , padding=snake_case__ )
UpperCAmelCase__ : Any = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(snake_case__ , padding="longest" )
UpperCAmelCase__ : List[Any] = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(snake_case__ , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCAmelCase__ : Optional[int] = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="np" )
UpperCAmelCase__ : Dict = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case__ ):
feat_extract.pad(snake_case__ , padding="max_length" )[input_name]
UpperCAmelCase__ : Optional[Any] = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=snake_case__ , return_tensors="np" )
UpperCAmelCase__ : Any = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(_inputs_are_equal(snake_case__ , snake_case__ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ : List[Any] = feat_extract.pad(snake_case__ , pad_to_multiple_of=1_0 )
UpperCAmelCase__ : str = input_a[input_name]
UpperCAmelCase__ : int = feat_extract.pad(snake_case__ , padding="longest" , pad_to_multiple_of=1_0 )
UpperCAmelCase__ : List[str] = input_a[input_name]
UpperCAmelCase__ : Optional[int] = feat_extract.pad(
snake_case__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=snake_case__ )
UpperCAmelCase__ : int = input_a[input_name]
UpperCAmelCase__ : str = feat_extract.pad(
snake_case__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=snake_case__ , return_tensors="np" , )
UpperCAmelCase__ : List[Any] = input_a[input_name]
self.assertTrue(all(len(snake_case__ ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Tuple = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(snake_case__ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase__ : str = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def __a ( self : List[Any] , snake_case__ : Dict=False ):
'''simple docstring'''
def _inputs_have_equal_length(snake_case__ : Optional[int] ):
UpperCAmelCase__ : Dict = len(input[0] )
for input_slice in input[1:]:
if len(snake_case__ ) != length:
return False
return True
def _inputs_are_equal(snake_case__ : Optional[int] , snake_case__ : str ):
if len(snake_case__ ) != len(snake_case__ ):
return False
for input_slice_a, input_slice_a in zip(snake_case__ , snake_case__ ):
if not np.allclose(np.asarray(snake_case__ ) , np.asarray(snake_case__ ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case__ )
UpperCAmelCase__ : Tuple = feat_extract.model_input_names[0]
UpperCAmelCase__ : Tuple = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase__ : List[Any] = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=snake_case__ )
UpperCAmelCase__ : Any = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(snake_case__ , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCAmelCase__ : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertFalse(_inputs_have_equal_length(snake_case__ ) )
# truncate to smallest with np
UpperCAmelCase__ : Tuple = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=snake_case__ , )
UpperCAmelCase__ : Dict = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCAmelCase__ : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case__ ) )
# truncate to middle
UpperCAmelCase__ : Union[str, Any] = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=snake_case__ , return_tensors="np" , )
UpperCAmelCase__ : int = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=snake_case__ )
UpperCAmelCase__ : int = input_a[input_name]
UpperCAmelCase__ : List[Any] = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCAmelCase__ : Optional[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(_inputs_are_equal(snake_case__ , snake_case__ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case__ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case__ ):
feat_extract.pad(snake_case__ , truncation=snake_case__ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case__ ):
feat_extract.pad(snake_case__ , padding="longest" , truncation=snake_case__ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case__ ):
feat_extract.pad(snake_case__ , padding="longest" , truncation=snake_case__ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case__ ):
feat_extract.pad(snake_case__ , padding="max_length" , truncation=snake_case__ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ : List[Any] = 1_2
UpperCAmelCase__ : Any = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case__ , truncation=snake_case__ , )
UpperCAmelCase__ : Tuple = input_a[input_name]
UpperCAmelCase__ : str = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case__ , )
UpperCAmelCase__ : Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase__ : str = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase__ : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case__ ) )
self.assertFalse(_inputs_have_equal_length(snake_case__ ) )
def __a ( self : Dict ):
'''simple docstring'''
self._check_padding(numpify=snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._check_padding(numpify=snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
self._check_truncation(numpify=snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
self._check_truncation(numpify=snake_case__ )
@require_torch
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Any = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : List[str] = feat_extract.model_input_names[0]
UpperCAmelCase__ : int = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : List[Any] = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ : List[str] = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : int = feat_extract.model_input_names[0]
UpperCAmelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : Dict = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ : int = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.feat_extract_dict
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Tuple = self.feature_extraction_class(**snake_case__ )
UpperCAmelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : str = [len(snake_case__ ) for x in speech_inputs]
UpperCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : str = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : List[str] = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , snake_case__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.feat_extract_dict
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Dict = self.feature_extraction_class(**snake_case__ )
UpperCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = [len(snake_case__ ) for x in speech_inputs]
UpperCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
UpperCAmelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : Optional[int] = min(snake_case__ )
UpperCAmelCase__ : Dict = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=snake_case__ , truncation=snake_case__ , return_tensors="np" )
self.assertIn("attention_mask" , snake_case__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 298
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 1
|
import logging
import os
from .state import PartialState
class a_ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( snake_case : Dict ):
SCREAMING_SNAKE_CASE =PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCAmelCase ( self : int ,snake_case : Dict ,snake_case : int ,*snake_case : str ,**snake_case : List[str] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
SCREAMING_SNAKE_CASE =kwargs.pop('main_process_only' ,snake_case )
SCREAMING_SNAKE_CASE =kwargs.pop('in_order' ,snake_case )
if self.isEnabledFor(snake_case ):
if self._should_log(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.process(snake_case ,snake_case )
self.logger.log(snake_case ,snake_case ,*snake_case ,**snake_case )
elif in_order:
SCREAMING_SNAKE_CASE =PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.process(snake_case ,snake_case )
self.logger.log(snake_case ,snake_case ,*snake_case ,**snake_case )
state.wait_for_everyone()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None ):
"""simple docstring"""
if log_level is None:
SCREAMING_SNAKE_CASE =os.environ.get('ACCELERATE_LOG_LEVEL', lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =logging.getLogger(lowerCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase_, {} )
| 334
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowercase : Union[str, Any], lowercase : Any, lowercase : Optional[Any], lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = FunnelConfig.from_json_file(lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase, lowercase, lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), lowercase )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowercase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 287
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Any = logging.getLogger(__name__)
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(lowercase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowercase, p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', lowercase, lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
return results
def a__ ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a ( __a ):
__a : Dict = """"""
__a : str = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : str , lowercase : Optional[DatasetInfo] = None , lowercase : Optional[str] = None , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(self , **lowercase )
UpperCAmelCase = repo_info
UpperCAmelCase = token
UpperCAmelCase = None
def A ( self : Optional[int] ):
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase ): {'''name''': str(lowercase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self : Tuple , lowercase : str , lowercase : str = "rb" , **lowercase : Dict , ):
'''simple docstring'''
if not isinstance(self.repo_info , lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase = hf_hub_url(self.repo_info.id , lowercase , revision=self.repo_info.sha )
return fsspec.open(
lowercase , mode=lowercase , headers=get_authentication_headers_for_url(lowercase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self : List[str] , lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase = self._strip_protocol(lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase )
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[Any]=False , **lowercase : Any ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase = PurePosixPath(path.strip('''/''' ) )
UpperCAmelCase = {}
for p, f in self.dir_cache.items():
UpperCAmelCase = PurePosixPath(p.strip('''/''' ) )
UpperCAmelCase = p.parent
if root == path:
UpperCAmelCase = f
UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 34
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Union[str, Any]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Any:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
UpperCAmelCase_ = 2
# New Code #
UpperCAmelCase_ = int(args.gradient_accumulation_steps )
UpperCAmelCase_ = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
UpperCAmelCase_ = model(**__UpperCamelCase )
UpperCAmelCase_ = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__UpperCamelCase )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__UpperCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__UpperCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 177
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
for char in word:
__lowerCamelCase = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = set()
for token in tokens:
__lowerCamelCase = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
__lowerCamelCase = list(UpperCamelCase__ )
return word_list
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : set() ) -> str:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__lowerCamelCase = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
__lowerCamelCase = bert_tokens
__lowerCamelCase , __lowerCamelCase = 0, len(UpperCamelCase__ )
while start < end:
__lowerCamelCase = True
if is_chinese(bert_word[start] ):
__lowerCamelCase = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
__lowerCamelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase = '##' + bert_word[j]
__lowerCamelCase = start + i
__lowerCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : LTP , UpperCamelCase__ : BertTokenizer ) -> int:
"""simple docstring"""
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
__lowerCamelCase = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = []
for id in input_ids:
__lowerCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
__lowerCamelCase = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
__lowerCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase = LTP(args.ltp ) # faster in GPU device
__lowerCamelCase = BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase = [json.dumps(UpperCamelCase__ ) + '\n' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__A = parser.parse_args()
main(args)
| 90
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : List[Any] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 161
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ : Optional[int] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
model.push_to_hub('''test-model-flax''', use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase, repo_id='''test-model-flax''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''', use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase, repo_id='''valid_org/test-model-flax-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
def a_ ( __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =True
lowerCamelCase_ =flatten_dict(modela.params )
lowerCamelCase_ =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCamelCase_ =False
return models_are_equal
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
lowerCamelCase_ ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase, lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertTrue(check_models_equal(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
lowerCamelCase_ ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase, lowerCAmelCase ), max_shard_size='''10KB''' )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertTrue(check_models_equal(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert'''
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert'''
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
| 368
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1_024 , _lowerCamelCase=1_024 , _lowerCamelCase=False , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="train" , **_lowerCamelCase )
_lowerCAmelCase : List[Any] = tok.pad_token_id
def get_lens(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = tqdm(
DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCAmelCase : str = []
for batch in dl:
_lowerCAmelCase : List[Any] = batch["input_ids"].ne(_lowerCamelCase ).sum(1 ).tolist()
_lowerCAmelCase : Union[str, Any] = batch["labels"].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase , _lowerCamelCase ):
max_lens.append(max(_lowerCamelCase , _lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
_lowerCAmelCase : Optional[int] = get_lens(_lowerCamelCase )
_lowerCAmelCase : Any = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="val" , **_lowerCamelCase )
_lowerCAmelCase : str = get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase , train_ds.len_file )
pickle_save(_lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 36
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __UpperCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE : Any = """LayoutLMv2ImageProcessor"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__(self : Union[str, Any] , snake_case_ : Dict=None , snake_case_ : Optional[Any]=None , **snake_case_ : Union[str, Any] ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
__a : List[str] = kwargs.pop('''feature_extractor''' )
__a : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
def __call__(self : Tuple , snake_case_ : List[str] , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case_ : Union[List[List[int]], List[List[List[int]]]] = None , snake_case_ : Optional[Union[List[int], List[List[int]]]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Any , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__a : List[Any] = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
__a : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a : Any = features["""words"""]
__a : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
__a : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a : Optional[Any] = self.get_overflowing_images(snake_case_ , encoded_inputs['''overflow_to_sample_mapping'''] )
__a : List[str] = images
return encoded_inputs
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : str ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f" {len(snake_case_ )} and {len(snake_case_ )}" )
return images_with_overflow
def lowerCAmelCase (self : List[Any] , *snake_case_ : List[str] , **snake_case_ : str ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Dict ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase (self : str ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase (self : Dict ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case_ , )
return self.image_processor_class
@property
def lowerCAmelCase (self : List[str] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case_ , )
return self.image_processor
| 356
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : CommonSchedulerState
# setable values
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def lowerCAmelCase (cls : int , snake_case_ : CommonSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray ):
return cls(common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ )
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : DDPMSchedulerState
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = [e.name for e in FlaxKarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : jnp.dtype
@property
def lowerCAmelCase (self : Optional[Any] ):
return True
@register_to_config
def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[jnp.ndarray] = None , snake_case_ : str = "fixed_small" , snake_case_ : bool = True , snake_case_ : str = "epsilon" , snake_case_ : jnp.dtype = jnp.floataa , ):
__a : str = dtype
def lowerCAmelCase (self : Any , snake_case_ : Optional[CommonSchedulerState] = None ):
if common is None:
__a : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a : int = jnp.array(1.0 , dtype=self.dtype )
__a : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : Dict , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ):
__a : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a : Any = (jnp.arange(0 , snake_case_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None ):
__a : Optional[Any] = state.common.alphas_cumprod[t]
__a : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a : Optional[Any] = jnp.clip(snake_case_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a : int = jnp.log(jnp.clip(snake_case_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__a : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a : Any = variance
__a : Dict = state.common.betas[t]
__a : Any = (predicted_variance + 1) / 2
__a : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase (self : Any , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : jnp.ndarray , snake_case_ : Optional[jax.random.KeyArray] = None , snake_case_ : bool = True , ):
__a : int = timestep
if key is None:
__a : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a : List[str] = jnp.split(snake_case_ , sample.shape[1] , axis=1 )
else:
__a : int = None
# 1. compute alphas, betas
__a : Optional[int] = state.common.alphas_cumprod[t]
__a : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a : Optional[int] = 1 - alpha_prod_t
__a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__a : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a : Dict = jnp.clip(snake_case_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a : Optional[int] = jax.random.split(snake_case_ , num=1 )
__a : Union[str, Any] = jax.random.normal(snake_case_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case_ , snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
__a : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case_ , state=snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return add_noise_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return get_velocity_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def __len__(self : List[str] ):
return self.config.num_train_timesteps
| 90
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __A :
def __init__( self , a__ = None ):
if components is None:
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : str = list(a__ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(a__ , self.__components ) ) + ")"
def __add__( self , a__ ):
_lowerCAmelCase : List[str] = len(self )
if size == len(a__ ):
_lowerCAmelCase : str = [self.__components[i] + other.component(a__ ) for i in range(a__ )]
return Vector(a__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , a__ ):
_lowerCAmelCase : int = len(self )
if size == len(a__ ):
_lowerCAmelCase : Optional[Any] = [self.__components[i] - other.component(a__ ) for i in range(a__ )]
return Vector(a__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , a__ ):
...
@overload
def __mul__( self , a__ ):
...
def __mul__( self , a__ ):
if isinstance(a__ , (float, int) ):
_lowerCAmelCase : List[Any] = [c * other for c in self.__components]
return Vector(a__ )
elif isinstance(a__ , a__ ) and len(self ) == len(a__ ):
_lowerCAmelCase : List[Any] = len(self )
_lowerCAmelCase : List[str] = [self.__components[i] * other.component(a__ ) for i in range(a__ )]
return sum(a__ )
else: # error case
raise Exception("""invalid operand!""" )
def __A ( self ):
return Vector(self.__components )
def __A ( self , a__ ):
if isinstance(a__ , a__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __A ( self , a__ , a__ ):
assert -len(self.__components ) <= pos < len(self.__components )
_lowerCAmelCase : int = value
def __A ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_lowerCAmelCase : int = [c**2 for c in self.__components]
return math.sqrt(sum(a__ ) )
def __A ( self , a__ , a__ = False ):
_lowerCAmelCase : List[Any] = self * other
_lowerCAmelCase : int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Vector:
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> Vector:
assert isinstance(_lowerCamelCase ,_lowerCamelCase ) and (isinstance(_lowerCamelCase ,_lowerCamelCase ))
_lowerCAmelCase : Union[str, Any] = [0] * dimension
_lowerCAmelCase : List[Any] = 1
return Vector(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : Vector ,_lowerCamelCase : Vector ) -> Vector:
assert (
isinstance(_lowerCamelCase ,_lowerCamelCase )
and isinstance(_lowerCamelCase ,_lowerCamelCase )
and (isinstance(_lowerCamelCase ,(int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Vector:
random.seed(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [random.randint(_lowerCamelCase ,_lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class __A :
def __init__( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = matrix
_lowerCAmelCase : Dict = w
_lowerCAmelCase : Optional[int] = h
def __str__( self ):
_lowerCAmelCase : Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , a__ ):
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase : str = []
for i in range(self.__height ):
_lowerCAmelCase : List[Any] = [
self.__matrix[i][j] + other.component(a__ , a__ )
for j in range(self.__width )
]
matrix.append(a__ )
return Matrix(a__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , a__ ):
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase : Any = []
for i in range(self.__height ):
_lowerCAmelCase : Any = [
self.__matrix[i][j] - other.component(a__ , a__ )
for j in range(self.__width )
]
matrix.append(a__ )
return Matrix(a__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , a__ ):
...
@overload
def __mul__( self , a__ ):
...
def __mul__( self , a__ ):
if isinstance(a__ , a__ ): # matrix-vector
if len(a__ ) == self.__width:
_lowerCAmelCase : int = zero_vector(self.__height )
for i in range(self.__height ):
_lowerCAmelCase : Dict = [
self.__matrix[i][j] * other.component(a__ )
for j in range(self.__width )
]
ans.change_component(a__ , sum(a__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(a__ , (int, float) ): # matrix-scalar
_lowerCAmelCase : Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a__ , self.__width , self.__height )
return None
def __A ( self ):
return self.__height
def __A ( self ):
return self.__width
def __A ( self , a__ , a__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __A ( self , a__ , a__ , a__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
_lowerCAmelCase : List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __A ( self , a__ , a__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_lowerCAmelCase : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a__ ) ):
_lowerCAmelCase : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self , a__ , a__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a__ , a__ )
else:
raise Exception("""Indices out of bounds""" )
def __A ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_lowerCAmelCase : Dict = [
self.__matrix[0][y] * self.cofactor(0 , a__ ) for y in range(self.__width )
]
return sum(a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Matrix:
_lowerCAmelCase : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Matrix:
random.seed(_lowerCamelCase )
_lowerCAmelCase : list[list[float]] = [
[random.randint(_lowerCamelCase ,_lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
| 44
|
'''simple docstring'''
import math
def A_ ( snake_case , snake_case ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 139
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ):
'''simple docstring'''
UpperCAmelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Predict target for test data
UpperCAmelCase__ = xgb.predict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = predictions.reshape(len(SCREAMING_SNAKE_CASE__ ) , 1 )
return predictions
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = fetch_california_housing()
UpperCAmelCase__ , UpperCAmelCase__ = data_handling(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = train_test_split(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , test_size=0.25 , random_state=1 )
UpperCAmelCase__ = xgboost(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 354
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = input_str.split("""_""" )
UpperCAmelCase__ = 0 if use_pascal else 1
UpperCAmelCase__ = words[start_index:]
UpperCAmelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 61
| 0
|
def __UpperCamelCase ( _lowerCAmelCase = 50 ) -> Optional[Any]:
"""simple docstring"""
A : Dict = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AlbertTokenizer
lowerCAmelCase__ = AlbertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =AlbertTokenizer(_lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase ='this is a test'
__lowercase ='this is a test'
return input_text, output_text
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='<pad>'
__lowercase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '▁eloquent')
self.assertEqual(len(_lowerCAmelCase) , 3_0_0_0_0)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0)
def __lowerCamelCase ( self : int):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase =self.get_tokenizer()
__lowercase =self.get_rust_tokenizer()
__lowercase ='I was born in 92000, and this is falsé.'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
__lowercase =rust_tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.get_rust_tokenizer()
__lowercase =tokenizer.encode(_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =AlbertTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase)
__lowercase =tokenizer.tokenize('This is a test')
self.assertListEqual(_lowerCAmelCase , ['▁this', '▁is', '▁a', '▁test'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4_8, 2_5, 2_1, 1_2_8_9])
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'])
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9])
__lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =AlbertTokenizer(_lowerCAmelCase)
__lowercase =tokenizer.encode('sequence builders')
__lowercase =tokenizer.encode('multi-sequence build')
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase ={'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 166
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = namedtuple("""result""" ,"""name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" ,power / current )
elif current == 0:
return result("""current""" ,power / voltage )
elif power == 0:
return result("""power""" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
snake_case = dict(zip(lowercase_, range(len(lowercase_ ) ) ) )
snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 16000,
'return_attention_mask': False,
'do_normalize': True,
}
snake_case = tempfile.mkdtemp()
snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(self.tmpdirname, lowercase_ )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.feature_extraction_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
# load decoder from hub
snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def _lowerCamelCase ( self, **lowercase_ ) -> List[Any]:
snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(lowercase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> List[str]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowercase_ )
def _lowerCamelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.get_tokenizer()
snake_case = self.get_feature_extractor()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
processor.save_pretrained(self.tmpdirname )
snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowercase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowercase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def _lowerCamelCase ( self ) -> int:
snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(lowercase_, 'include' ):
WavaVecaProcessorWithLM(
tokenizer=lowercase_, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def _lowerCamelCase ( self ) -> int:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
snake_case = floats_list((3, 1000) )
snake_case = feature_extractor(lowercase_, return_tensors='np' )
snake_case = processor(lowercase_, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
snake_case = 'This is a test string'
snake_case = processor(text=lowercase_ )
snake_case = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCamelCase ( self, lowercase_=(2, 10, 16), lowercase_=77 ) -> List[str]:
np.random.seed(lowercase_ )
return np.random.rand(*lowercase_ )
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
snake_case = self._get_dummy_logits(shape=(10, 16), seed=13 )
snake_case = processor.decode(lowercase_ )
snake_case = decoder.decode_beams(lowercase_ )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('</s> <s> </s>', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def _lowerCamelCase ( self, lowercase_ ) -> Tuple:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case = processor.batch_decode(lowercase_ )
else:
with get_context(lowercase_ ).Pool() as pool:
snake_case = processor.batch_decode(lowercase_, lowercase_ )
snake_case = list(lowercase_ )
with get_context('fork' ).Pool() as p:
snake_case = decoder.decode_beams_batch(lowercase_, lowercase_ )
snake_case , snake_case , snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowercase_, decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'], decoded_processor.text )
self.assertListEqual(lowercase_, decoded_processor.logit_score )
self.assertListEqual(lowercase_, decoded_processor.lm_score )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
snake_case = self._get_dummy_logits()
snake_case = 15
snake_case = -20.0
snake_case = -4.0
snake_case = processor.batch_decode(
lowercase_, beam_width=lowercase_, beam_prune_logp=lowercase_, token_min_logp=lowercase_, )
snake_case = decoded_processor_out.text
snake_case = list(lowercase_ )
with get_context('fork' ).Pool() as pool:
snake_case = decoder.decode_beams_batch(
lowercase_, lowercase_, beam_width=lowercase_, beam_prune_logp=lowercase_, token_min_logp=lowercase_, )
snake_case = [d[0][0] for d in decoded_decoder_out]
snake_case = [d[0][2] for d in decoded_decoder_out]
snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowercase_, lowercase_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'], lowercase_ )
self.assertTrue(np.array_equal(lowercase_, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], lowercase_, atol=1E-3 ) )
self.assertTrue(np.array_equal(lowercase_, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474], lowercase_, atol=1E-3 ) )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
snake_case = self._get_dummy_logits()
snake_case = 2.0
snake_case = 5.0
snake_case = -20.0
snake_case = True
snake_case = processor.batch_decode(
lowercase_, alpha=lowercase_, beta=lowercase_, unk_score_offset=lowercase_, lm_score_boundary=lowercase_, )
snake_case = decoded_processor_out.text
snake_case = list(lowercase_ )
decoder.reset_params(
alpha=lowercase_, beta=lowercase_, unk_score_offset=lowercase_, lm_score_boundary=lowercase_, )
with get_context('fork' ).Pool() as pool:
snake_case = decoder.decode_beams_batch(
lowercase_, lowercase_, )
snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowercase_, lowercase_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'], lowercase_ )
snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case = processor.decoder.model_container[processor.decoder._model_key]
snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
snake_case = os.listdir(lowercase_ )
snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
snake_case = WavaVecaProcessorWithLM.from_pretrained(lowercase_ )
snake_case = processor.decoder.model_container[processor.decoder._model_key]
snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
snake_case = os.listdir(lowercase_ )
snake_case = os.listdir(lowercase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case = floats_list((3, 1000) )
snake_case = processor_wavaveca(lowercase_, return_tensors='np' )
snake_case = processor_auto(lowercase_, return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1E-2 )
snake_case = self._get_dummy_logits()
snake_case = processor_wavaveca.batch_decode(lowercase_ )
snake_case = processor_auto.batch_decode(lowercase_ )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
@staticmethod
def _lowerCamelCase ( lowercase_, lowercase_ ) -> Dict:
snake_case = [d[key] for d in offsets]
return retrieved_list
def _lowerCamelCase ( self ) -> List[str]:
snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case = self._get_dummy_logits()[0]
snake_case = processor.decode(lowercase_, output_word_offsets=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowercase_, lowercase_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'], 'word' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'word' ), ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'start_offset' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'end_offset' ), [1, 3, 5] )
def _lowerCamelCase ( self ) -> str:
snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case = self._get_dummy_logits()
snake_case = processor.batch_decode(lowercase_, output_word_offsets=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowercase_, lowercase_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(lowercase_, 'word' ) ) for o in outputs['word_offsets']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'word' ), ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'start_offset' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'end_offset' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCamelCase ( self ) -> int:
import torch
snake_case = load_dataset('common_voice', 'en', split='train', streaming=lowercase_ )
snake_case = ds.cast_column('audio', datasets.Audio(sampling_rate=16000 ) )
snake_case = iter(lowercase_ )
snake_case = next(lowercase_ )
snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case = processor(sample['audio']['array'], return_tensors='pt' ).input_values
with torch.no_grad():
snake_case = model(lowercase_ ).logits.cpu().numpy()
snake_case = processor.decode(logits[0], output_word_offsets=lowercase_ )
snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(lowercase_, 'word' ) ), lowercase_ )
self.assertEqual(' '.join(self.get_from_offsets(lowercase_, 'word' ) ), output.text )
# output times
snake_case = torch.tensor(self.get_from_offsets(lowercase_, 'start_time' ) )
snake_case = torch.tensor(self.get_from_offsets(lowercase_, 'end_time' ) )
# fmt: off
snake_case = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
snake_case = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=0.01 ) )
self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=0.01 ) )
| 332
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ (metaclass=__lowerCAmelCase ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["onnx"]
def __init__( self : Optional[int] , *A : List[Any] , **A : Dict ):
requires_backends(self , ["onnx"] )
@classmethod
def _A ( cls : str , *A : Any , **A : Tuple ):
requires_backends(cls , ["onnx"] )
@classmethod
def _A ( cls : Any , *A : Dict , **A : List[Any] ):
requires_backends(cls , ["onnx"] )
| 31
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a_ : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
a_ : Optional[int] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(state_dict.keys())
for name in state_dict_keys:
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
# emb -> embedding
if name.startswith('emb.'):
SCREAMING_SNAKE_CASE = name.replace('emb.' , 'embeddings.')
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0'):
SCREAMING_SNAKE_CASE = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln')
# att -> attention
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , _UpperCAmelCase)
# ffn -> feed_forward
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , _UpperCAmelCase)
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_k' , '.time_mix_key')
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_v' , '.time_mix_value')
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_r' , '.time_mix_receptance')
if name != "head.weight":
SCREAMING_SNAKE_CASE = 'rwkv.' + name
SCREAMING_SNAKE_CASE = weight
return state_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.')
SCREAMING_SNAKE_CASE = 5_0277
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
else:
SCREAMING_SNAKE_CASE = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
# 2. Build the config
SCREAMING_SNAKE_CASE = list(NUM_HIDDEN_LAYERS_MAPPING.keys())
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.')
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''')
SCREAMING_SNAKE_CASE = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase)
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase)
# 4. Split in shards and save
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = shard_checkpoint(_UpperCAmelCase)
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
if index is not None:
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
# Save the index as well
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.')
SCREAMING_SNAKE_CASE = list(shards.keys())
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase))
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase)
model.push_to_hub(_UpperCAmelCase , max_shard_size='2GB')
tokenizer.push_to_hub(_UpperCAmelCase)
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a_ : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 367
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 327
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
_UpperCAmelCase : Dict = 'A painting of a squirrel eating a burger'
_UpperCAmelCase : List[Any] = jax.device_count()
_UpperCAmelCase : Dict = num_samples * [prompt]
_UpperCAmelCase : Tuple = sd_pipe.prepare_inputs(__UpperCAmelCase )
_UpperCAmelCase : Tuple = replicate(__UpperCAmelCase )
_UpperCAmelCase : Dict = shard(__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
_UpperCAmelCase : List[Any] = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_UpperCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : Any = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : Any = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = 'stabilityai/stable-diffusion-2'
_UpperCAmelCase : int = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder="""scheduler""" )
_UpperCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
_UpperCAmelCase : Union[str, Any] = scheduler_params
_UpperCAmelCase : Dict = 'A painting of a squirrel eating a burger'
_UpperCAmelCase : Optional[Any] = jax.device_count()
_UpperCAmelCase : Dict = num_samples * [prompt]
_UpperCAmelCase : Dict = sd_pipe.prepare_inputs(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = replicate(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = shard(__UpperCAmelCase )
_UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[int] = jax.random.split(__UpperCAmelCase , jax.device_count() )
_UpperCAmelCase : str = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_UpperCAmelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : Dict = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : Optional[int] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 246
|
"""simple docstring"""
from __future__ import annotations
__A = 1.6_021e-19 # units = C
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'table-transformer'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2_048 , lowercase_=8 , lowercase_=6 , lowercase_=2_048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_snake_case : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
_snake_case : str = backbone_config.get("model_type" )
_snake_case : Tuple = CONFIG_MAPPING[backbone_model_type]
_snake_case : List[str] = config_class.from_dict(lowercase_ )
# set timm attributes to None
_snake_case : Dict = None, None, None
_snake_case : Optional[int] = use_timm_backbone
_snake_case : List[Any] = backbone_config
_snake_case : Tuple = num_channels
_snake_case : Dict = num_queries
_snake_case : Any = d_model
_snake_case : Union[str, Any] = encoder_ffn_dim
_snake_case : Optional[int] = encoder_layers
_snake_case : Optional[Any] = encoder_attention_heads
_snake_case : str = decoder_ffn_dim
_snake_case : str = decoder_layers
_snake_case : str = decoder_attention_heads
_snake_case : Optional[int] = dropout
_snake_case : Union[str, Any] = attention_dropout
_snake_case : Tuple = activation_dropout
_snake_case : Tuple = activation_function
_snake_case : str = init_std
_snake_case : Dict = init_xavier_std
_snake_case : Any = encoder_layerdrop
_snake_case : int = decoder_layerdrop
_snake_case : Any = encoder_layers
_snake_case : List[str] = auxiliary_loss
_snake_case : List[str] = position_embedding_type
_snake_case : List[Any] = backbone
_snake_case : Any = use_pretrained_backbone
_snake_case : int = dilation
# Hungarian matcher
_snake_case : str = class_cost
_snake_case : int = bbox_cost
_snake_case : Tuple = giou_cost
# Loss coefficients
_snake_case : Union[str, Any] = mask_loss_coefficient
_snake_case : Optional[Any] = dice_loss_coefficient
_snake_case : str = bbox_loss_coefficient
_snake_case : Union[str, Any] = giou_loss_coefficient
_snake_case : int = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ):
return self.d_model
class lowercase_ ( __snake_case ):
_lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase ( self ):
return 1e-5
@property
def UpperCamelCase ( self ):
return 12
| 350
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : list[list[Edge]] = [[] for _ in range(lowercase_ )]
_snake_case : Union[str, Any] = size
def __getitem__( self , lowercase_ ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase ( self ):
return self._size
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : Tuple = 0
while queue:
_snake_case : List[Any] = queue.popleft()
_snake_case : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Dict = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str | Literal[False]:
"""simple docstring"""
_UpperCamelCase = list(a__ )
_UpperCamelCase = list(a__ )
_UpperCamelCase = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
_UpperCamelCase = '''_'''
if count > 1:
return False
else:
return "".join(a__ )
def lowerCamelCase__ ( __snake_case ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
while True:
_UpperCamelCase = ['''$'''] * len(a__ )
_UpperCamelCase = []
for i in range(len(a__ ) ):
for j in range(i + 1, len(a__ ) ):
_UpperCamelCase = compare_string(binary[i], binary[j] )
if k is False:
_UpperCamelCase = '''*'''
_UpperCamelCase = '''*'''
temp.append('''X''' )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
_UpperCamelCase = list(set(a__ ) )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
for minterm in minterms:
_UpperCamelCase = ''''''
for _ in range(a__ ):
_UpperCamelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = list(a__ )
_UpperCamelCase = list(a__ )
_UpperCamelCase = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = [0] * len(a__ )
for i in range(len(chart[0] ) ):
_UpperCamelCase = 0
_UpperCamelCase = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
_UpperCamelCase = j
if count == 1:
_UpperCamelCase = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
_UpperCamelCase = 0
temp.append(prime_implicants[i] )
while True:
_UpperCamelCase = 0
_UpperCamelCase = -1
_UpperCamelCase = 0
for i in range(len(a__ ) ):
_UpperCamelCase = chart[i].count(1 )
if count_n > max_n:
_UpperCamelCase = count_n
_UpperCamelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
_UpperCamelCase = 0
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
_UpperCamelCase = prime_implicants[i].count('''_''' )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i], binary[j], a__ ):
_UpperCamelCase = 1
return chart
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = int(input('''Enter the no. of variables\n''' ) )
_UpperCamelCase = [
float(a__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
_UpperCamelCase = decimal_to_binary(a__, a__ )
_UpperCamelCase = check(a__ )
print('''Prime Implicants are:''' )
print(a__ )
_UpperCamelCase = prime_implicant_chart(a__, a__ )
_UpperCamelCase = selection(a__, a__ )
print('''Essential Prime Implicants are:''' )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 194
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 6
| 0
|
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Union[str, Any]:
# Input as list
lowercase__ : Tuple = list(poly_a or [0] )[:]
lowercase__ : int = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase__ : Any = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase__ : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase__ : List[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase__ : List[str] = self.__multiply()
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
lowercase__ : List[str] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
lowercase__ : Optional[int] = self.c_max_length // 2
while next_ncol > 0:
lowercase__ : int = [[] for i in range(__lowerCAmelCase )]
lowercase__ : Any = self.root**next_ncol
# First half of next step
lowercase__ : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase__ : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase__ : Dict = new_dft
lowercase__ : str = next_ncol // 2
return dft[0]
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[Any] = self.__dft('''A''' )
lowercase__ : Union[str, Any] = self.__dft('''B''' )
lowercase__ : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase__ : int = 2
while next_ncol <= self.c_max_length:
lowercase__ : str = [[] for i in range(__lowerCAmelCase )]
lowercase__ : str = self.root ** (next_ncol // 2)
lowercase__ : int = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase__ : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
lowercase__ : Dict = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Union[str, Any]:
lowercase__ : List[str] = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase__ : str = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase__ : Union[str, Any] = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
|
'''simple docstring'''
import qiskit
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase__ : Any = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase__ : Any = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 214
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowercase__ ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Tuple =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCAmelCase : Optional[int] = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = vqa_pipeline(__lowerCamelCase , top_k=1 )
self.assertEqual(
__lowerCamelCase , [
[{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase )}],
[{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase )}],
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCAmelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase : Optional[Any] = "How many cats are there?"
lowerCAmelCase : Any = vqa_pipeline(image=__lowerCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__lowerCamelCase , [{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase )}] )
lowerCAmelCase : List[str] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__lowerCamelCase , [{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase )}] )
@slow
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
lowerCAmelCase : Union[str, Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase : List[str] = "How many cats are there?"
lowerCAmelCase : int = vqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
lowerCAmelCase : str = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
lowerCAmelCase : List[Any] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
| 108
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__: Optional[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str]=False ) -> Dict:
__UpperCAmelCase : Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__UpperCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _snake_case ( _lowercase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: str=13 , __lowerCamelCase: Any=7 , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: str=32 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: int=5_12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: Union[str, Any]=None , ) -> Optional[int]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : List[str] = embedding_size
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = TFMobileBertModel(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ) -> Optional[int]:
__UpperCAmelCase : List[str] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Tuple = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
__UpperCAmelCase : Dict = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCAmelCase : Dict = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = model(__lowerCamelCase )[0]
__UpperCAmelCase : Any = [1, 6, 3_05_22]
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : str = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 157
| 0
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCAmelCase = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCAmelCase = True
if a[i].islower():
__lowerCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259
| 1
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
a : Union[str, Any] = 0.00
a : List[str] = 0
for resistor in resistors:
if resistor <= 0:
a : Optional[Any] = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_A )
first_sum += 1 / float(_A )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
a : int = 0.00
a : List[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
a : Any = F"""Resistor at index {index} has a negative value!"""
raise ValueError(_A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] ,_a : bool ,_a : Optional[int] = None ,_a : Optional[int] = None ):
'''simple docstring'''
super().__init__()
_a : List[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_a : Optional[Any] = torch.zeros(_a ,_a )
else:
_a : Tuple = None
_a : Tuple = torch.nn.Parameter(_a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : VQModel
__UpperCAmelCase : CLIPTextModel
__UpperCAmelCase : CLIPTokenizer
__UpperCAmelCase : TransformeraDModel
__UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings
__UpperCAmelCase : VQDiffusionScheduler
def __init__( self : Tuple ,_a : VQModel ,_a : CLIPTextModel ,_a : CLIPTokenizer ,_a : TransformeraDModel ,_a : VQDiffusionScheduler ,_a : LearnedClassifierFreeSamplingEmbeddings ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_a ,transformer=_a ,text_encoder=_a ,tokenizer=_a ,scheduler=_a ,learned_classifier_free_sampling_embeddings=_a ,)
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Optional[int] ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = len(_a ) if isinstance(_a ,_a ) else 1
# get prompt text embeddings
_a : Optional[int] = self.tokenizer(
_a ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
_a : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_a : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_a : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
_a : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_a : List[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=_a )
# duplicate text embeddings for each generation per prompt
_a : int = prompt_embeds.repeat_interleave(_a ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_a : List[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_a : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(_a ,1 ,1 )
else:
_a : int = [''] * batch_size
_a : str = text_input_ids.shape[-1]
_a : Any = self.tokenizer(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='pt' ,)
_a : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_a : int = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=_a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_a : Union[str, Any] = negative_prompt_embeds.shape[1]
_a : List[Any] = negative_prompt_embeds.repeat(1 ,_a ,1 )
_a : Optional[int] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_a ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple ,_a : Union[str, List[str]] ,_a : int = 100 ,_a : float = 5.0 ,_a : float = 1.0 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Dict = 1
elif isinstance(_a ,_a ):
_a : Optional[Any] = len(_a )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_a )}""" )
_a : Tuple = batch_size * num_images_per_prompt
_a : Any = guidance_scale > 1.0
_a : Union[str, Any] = self._encode_prompt(_a ,_a ,_a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a ,_a ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_a )}.""" )
# get the initial completely masked latents unless the user supplied it
_a : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_a : Dict = self.transformer.num_vector_embeds - 1
_a : Tuple = torch.full(_a ,_a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_a : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a ,device=self.device )
_a : Union[str, Any] = self.scheduler.timesteps.to(self.device )
_a : Tuple = latents
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the sample if we are doing classifier free guidance
_a : List[str] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_a : Tuple = self.transformer(_a ,encoder_hidden_states=_a ,timestep=_a ).sample
if do_classifier_free_guidance:
_a, _a : List[Any] = model_output.chunk(2 )
_a : List[str] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_a ,dim=1 ,keepdim=_a )
_a : Dict = self.truncate(_a ,_a )
# remove `log(0)`'s (`-inf`s)
_a : Dict = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_a : Dict = self.scheduler.step(_a ,timestep=_a ,sample=_a ,generator=_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a ,_a ,_a )
_a : str = self.vqvae.config.vq_embed_dim
_a : Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_a : int = self.vqvae.quantize.get_codebook_entry(_a ,shape=_a )
_a : Union[str, Any] = self.vqvae.decode(_a ,force_not_quantize=_a ).sample
_a : Dict = (image / 2 + 0.5).clamp(0 ,1 )
_a : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
def __lowercase ( self : Dict ,_a : torch.FloatTensor ,_a : float ):
'''simple docstring'''
_a, _a : int = torch.sort(_a ,1 ,descending=_a )
_a : str = torch.exp(_a )
_a : List[str] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_a : Union[str, Any] = torch.full_like(keep_mask[:, 0:1, :] ,_a )
_a : Optional[int] = torch.cat((all_true, keep_mask) ,dim=1 )
_a : int = keep_mask[:, :-1, :]
_a : Any = keep_mask.gather(1 ,indices.argsort(1 ) )
_a : Any = log_p_x_0.clone()
_a : List[Any] = -torch.inf # -inf = log(0)
return rv
| 5
|
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = IFImgaImgSuperResolutionPipeline
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
snake_case__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"})
snake_case__ : int = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase_ ( self : Any ) -> Any:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=0 ) -> Optional[Any]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : Dict ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self : List[str] ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
self._test_save_load_local()
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _lowerCamelCase ( unittest.TestCase ):
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> List[Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , )
return config, input_ids, attention_mask
def snake_case_ (self ) -> Dict:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = FlaxDistilBertModelTester(self )
@slow
def snake_case_ (self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("distilbert-base-uncased" )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ) -> List[str]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCamelCase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase = model(__a , attention_mask=__a )[0]
UpperCamelCase = (1, 11, 7_68)
self.assertEqual(output.shape , __a )
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 244
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowerCamelCase ( _lowercase ):
def __init__(self , **__a ) -> Optional[int]:
super().__init__(**__a )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def snake_case_ (self , **__a ) -> List[Any]:
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCamelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCamelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCamelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCamelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCamelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCamelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCamelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCamelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> str:
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def snake_case_ (self , __a , __a=64 , __a = 0 , __a = 5_12 / 15_00 , __a = 32 , __a = 1 , ) -> List[str]:
UpperCamelCase = load_image(__a )
UpperCamelCase = self.image_processor.size["longest_edge"]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCamelCase = self.image_processor(images=__a , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase = self.get_inference_context()
with inference_context():
UpperCamelCase = self._ensure_tensor_on_device(__a , device=self.device )
UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCamelCase = image_embeddings
UpperCamelCase = grid_points.shape[1]
UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __a , __a ):
UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase = input_labels[:, i : i + points_per_batch]
UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> int:
UpperCamelCase = model_inputs.pop("input_boxes" )
UpperCamelCase = model_inputs.pop("is_last" )
UpperCamelCase = model_inputs.pop("original_sizes" ).tolist()
UpperCamelCase = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCamelCase = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase = model_outputs["pred_masks"]
UpperCamelCase = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCamelCase = model_outputs["iou_scores"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Optional[int]:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCamelCase = torch.cat(__a )
UpperCamelCase = torch.cat(__a )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCamelCase = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCamelCase = {}
if output_rle_mask:
UpperCamelCase = rle_mask
if output_bboxes_mask:
UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 244
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class a__( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)])
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase)
lowerCAmelCase = GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = AutoConfig.from_pretrained("""gpt2""")
lowerCAmelCase = GenerationConfig.from_model_config(__lowerCAmelCase)
lowerCAmelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = GenerationConfig()
lowerCAmelCase = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowerCAmelCase = copy.deepcopy(__lowerCAmelCase)
lowerCAmelCase = generation_config.update(**__lowerCAmelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase , {"""foo""": """bar"""})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = GenerationConfig()
lowerCAmelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""") as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = GenerationConfig.from_pretrained(__lowerCAmelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""")
lowerCAmelCase = GenerationConfig.from_model_config(__lowerCAmelCase)
assert not hasattr(__lowerCAmelCase , """foo""") # no new kwargs should be initialized if from config
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCAmelCase)
self.assertEqual(default_config.num_beams , 1)
lowerCAmelCase = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCAmelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class a__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a_ ( cls):
"""simple docstring"""
lowerCAmelCase = TOKEN
HfFolder.save_token(__lowerCAmelCase)
@classmethod
def a_ ( cls):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""")
except HTTPError:
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token)
lowerCAmelCase = GenerationConfig.from_pretrained(f"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id="""test-generation-config""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token)
lowerCAmelCase = GenerationConfig.from_pretrained(f"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token)
lowerCAmelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token)
lowerCAmelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
| 272
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , a_ : Optional[int] , a_ : str=13 , a_ : str=7 , a_ : List[Any]=True , a_ : Tuple=True , a_ : Union[str, Any]=True , a_ : Optional[int]=True , a_ : Optional[Any]=99 , a_ : Tuple=32 , a_ : Dict=5 , a_ : List[Any]=4 , a_ : int=37 , a_ : Dict="gelu" , a_ : List[str]=0.1 , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]=5_12 , a_ : List[str]=16 , a_ : Union[str, Any]=2 , a_ : Optional[int]=0.02 , a_ : Optional[Any]=3 , a_ : Union[str, Any]=4 , a_ : List[Any]=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : str = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : int = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : Optional[int] = num_labels
lowerCAmelCase_ : Optional[Any] = num_choices
lowerCAmelCase_ : str = scope
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[str] = None
if self.use_input_mask:
lowerCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Union[str, Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : Optional[Any] , a_ : int , a_ : Any ):
lowerCAmelCase_ : Any = NystromformerModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Any = model(a_ , attention_mask=a_ , token_type_ids=a_ )
lowerCAmelCase_ : List[Any] = model(a_ , token_type_ids=a_ )
lowerCAmelCase_ : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : List[Any] , a_ : Dict , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : str , a_ : str ):
lowerCAmelCase_ : Any = NystromformerForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : Optional[Any] , a_ : Union[str, Any] , a_ : Any , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple , a_ : Any ):
lowerCAmelCase_ : Any = NystromformerForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Any = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Any , a_ : List[str] , a_ : Any , a_ : Tuple , a_ : Optional[int] , a_ : List[str] , a_ : Tuple , a_ : Any ):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Dict = NystromformerForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Any = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[int] , a_ : str , a_ : Any , a_ : Optional[Any] , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = NystromformerForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Union[str, Any] , a_ : List[Any] , a_ : Tuple , a_ : Optional[int] , a_ : Any , a_ : List[str] , a_ : str , a_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = self.num_choices
lowerCAmelCase_ : Tuple = NystromformerForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Dict = config_and_inputs
lowerCAmelCase_ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : int = False
a_ : int = False
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = NystromformerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : str = type
self.model_tester.create_and_check_model(*a_ )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : int ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = NystromformerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(a_ )[0]
lowerCAmelCase_ : List[str] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , a_ )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = "the [MASK] of Belgium is Brussels"
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Any = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : int = tokenizer(a_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(encoding.input_ids ).logits
lowerCAmelCase_ : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(a_ ) , "capital" )
| 369
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = """lilt"""
def __init__( self : Any , a_ : List[str]=3_05_22 , a_ : List[Any]=7_68 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : str=30_72 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=5_12 , a_ : List[str]=2 , a_ : int=0.02 , a_ : Optional[int]=1e-1_2 , a_ : Any=0 , a_ : str="absolute" , a_ : List[Any]=None , a_ : Optional[int]=4 , a_ : str=10_24 , **a_ : Union[str, Any] , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Tuple = position_embedding_type
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
lowerCAmelCase_ : Optional[Any] = channel_shrink_ratio
lowerCAmelCase_ : Dict = max_ad_position_embeddings
| 161
| 0
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 1_00 ):
"""simple docstring"""
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"allenai/led-base-16384": 16_384,
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = LEDTokenizer
__lowerCamelCase : int = ['input_ids', 'attention_mask']
def __init__(self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space:
_a = getattr(A , pre_tok_state.pop('''type''' ) )
_a = add_prefix_space
_a = pre_tok_class(**A )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = '''post_processor'''
_a = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state['''sep'''] )
if "cls" in state:
_a = tuple(state['''cls'''] )
_a = False
if state.get('''add_prefix_space''' , A ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get('''trim_offsets''' , A ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(A , state.pop('''type''' ) )
_a = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def a__ (self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def a__ (self , A ) -> Any:
"""simple docstring"""
_a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
_a = value
def a__ (self , *A , **A ) -> BatchEncoding:
"""simple docstring"""
_a = kwargs.get('''is_split_into_words''' , A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*A , **A )
def a__ (self , *A , **A ) -> BatchEncoding:
"""simple docstring"""
_a = kwargs.get('''is_split_into_words''' , A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*A , **A )
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(A , name=A )
return tuple(A )
def a__ (self , A , A=None ) -> str:
"""simple docstring"""
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ (self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
"""simple docstring"""
_a = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
_a = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_a = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_a = len(encoded_inputs['''global_attention_mask'''] ) != len(A )
if needs_to_be_padded:
_a = len(A ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_a = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_a = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 211
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a = new_id
# turn into Numpy arrays
_a = np.array(__A)
_a = np.array(__A)
if reduce_labels:
_a = 255
_a = label - 1
_a = 255
_a = label != ignore_index
_a = np.not_equal(__A , __A)
_a = pred_label[mask]
_a = np.array(__A)[mask]
_a = pred_label[pred_label == label]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(__A , __A):
_a , _a , _a , _a = intersect_and_union(
__A , __A , __A , __A , __A , __A)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = None , __A = False , ):
"""simple docstring"""
_a , _a , _a , _a = total_intersect_and_union(
__A , __A , __A , __A , __A , __A)
# compute metrics
_a = {}
_a = total_area_intersect.sum() / total_area_label.sum()
_a = total_area_intersect / total_area_union
_a = total_area_intersect / total_area_label
_a = np.nanmean(__A)
_a = np.nanmean(__A)
_a = all_acc
_a = iou
_a = acc
if nan_to_num is not None:
_a = {metric: np.nan_to_num(__A , nan=__A) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a__ (self , A , A , A , A , A = None , A = None , A = False , ) -> List[Any]:
"""simple docstring"""
_a = mean_iou(
results=A , gt_seg_maps=A , num_labels=A , ignore_index=A , nan_to_num=A , label_map=A , reduce_labels=A , )
return iou_result
| 211
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple=None ) -> Optional[Any]:
__snake_case = None
if token is not None:
__snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
__snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__snake_case = requests.get(snake_case_ , headers=snake_case_ ).json()
__snake_case = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__snake_case = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
__snake_case = requests.get(url + f"""&page={i + 2}""" , headers=snake_case_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Any=None ) -> List[Any]:
__snake_case = None
if token is not None:
__snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
__snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__snake_case = requests.get(snake_case_ , headers=snake_case_ ).json()
__snake_case = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__snake_case = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
__snake_case = requests.get(url + f"""&page={i + 2}""" , headers=snake_case_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
__snake_case = None
if token is not None:
__snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
__snake_case = requests.get(snake_case_ , headers=snake_case_ , allow_redirects=snake_case_ )
__snake_case = result.headers['''Location''']
__snake_case = requests.get(snake_case_ , allow_redirects=snake_case_ )
__snake_case = os.path.join(snake_case_ , f"""{artifact_name}.zip""" )
with open(snake_case_ , '''wb''' ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any]=None ) -> str:
__snake_case = []
__snake_case = []
__snake_case = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
__snake_case = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__snake_case = line[: line.index(''': ''' )]
__snake_case = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__snake_case = line[len('''FAILED ''' ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
__snake_case = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` """
f"""and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
__snake_case = None
if job_name and job_links:
__snake_case = job_links.get(snake_case_ , snake_case_ )
# A list with elements of the form (line of error, error, failed test)
__snake_case = [x + [y] + [job_link] for x, y in zip(snake_case_ , snake_case_ )]
return result
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : List[Any]=None ) -> Dict:
__snake_case = []
__snake_case = [os.path.join(snake_case_ , snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_ , job_links=snake_case_ ) )
return errors
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ) -> str:
__snake_case = Counter()
counter.update([x[1] for x in logs] )
__snake_case = counter.most_common()
__snake_case = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__snake_case = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__snake_case = dict(sorted(r.items() , key=lambda snake_case_ : item[1]["count"] , reverse=snake_case_ ) )
return r
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Tuple:
__snake_case = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__snake_case = test.split('''/''' )[2]
else:
__snake_case = None
return test
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Union[str, Any]=None ) -> Optional[int]:
__snake_case = [(x[0], x[1], get_model(x[2] )) for x in logs]
__snake_case = [x for x in logs if x[2] is not None]
__snake_case = {x[2] for x in logs}
__snake_case = {}
for test in tests:
__snake_case = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__snake_case = counter.most_common()
__snake_case = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__snake_case = sum(error_counts.values() )
if n_errors > 0:
__snake_case = {'''count''': n_errors, '''errors''': error_counts}
__snake_case = dict(sorted(r.items() , key=lambda snake_case_ : item[1]["count"] , reverse=snake_case_ ) )
return r
def lowerCamelCase__ ( snake_case_ : Any ) -> Union[str, Any]:
__snake_case = '''| no. | error | status |'''
__snake_case = '''|-:|:-|:-|'''
__snake_case = [header, sep]
for error in reduced_by_error:
__snake_case = reduced_by_error[error]['''count''']
__snake_case = f"""| {count} | {error[:100]} | |"""
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Tuple:
__snake_case = '''| model | no. of errors | major error | count |'''
__snake_case = '''|-:|-:|-:|-:|'''
__snake_case = [header, sep]
for model in reduced_by_model:
__snake_case = reduced_by_model[model]['''count''']
__snake_case , __snake_case = list(reduced_by_model[model]['''errors'''].items() )[0]
__snake_case = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
snake_case_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case_ = get_job_links(args.workflow_run_id, token=args.token)
snake_case_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case_ = k.find(' / ')
snake_case_ = k[index + len(' / ') :]
snake_case_ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case_ = reduce_by_error(errors)
snake_case_ = reduce_by_model(errors)
snake_case_ = make_github_table(reduced_by_error)
snake_case_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 238
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = ['input_ids', 'attention_mask']
A_ : Optional[Any] = None
def __init__(self : Optional[int] , a__ : int=None , a__ : str=None , a__ : Any=None , a__ : List[Any]="<unk>" , a__ : List[Any]="<s>" , a__ : Optional[int]="</s>" , a__ : List[str]="<pad>" , a__ : Union[str, Any]=False , a__ : str=False , **a__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , add_prefix_space=a__ , clean_up_tokenization_spaces=a__ , **a__ , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , a__ ) != add_prefix_space:
__snake_case = getattr(a__ , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**a__ )
__snake_case = add_prefix_space
def a (self : int , *a__ : Tuple , **a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = kwargs.get('''is_split_into_words''' , a__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*a__ , **a__ )
def a (self : List[str] , *a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
__snake_case = kwargs.get('''is_split_into_words''' , a__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*a__ , **a__ )
def a (self : List[Any] , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def a (self : Tuple , a__ : "Conversation" ):
"""simple docstring"""
__snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
return input_ids
| 238
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : str = '''timm_backbone'''
def __init__( self : int ,_a : Union[str, Any]=None ,_a : Union[str, Any]=3 ,_a : str=True ,_a : str=True ,_a : List[str]=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Union[str, Any] = backbone
_a : Union[str, Any] = num_channels
_a : List[Any] = features_only
_a : Tuple = use_pretrained_backbone
_a : Any = True
_a : Optional[int] = out_indices if out_indices is not None else (-1,)
| 271
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271
| 1
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _snake_case ( ) -> int:
'''simple docstring'''
_A = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_A = Dataset.from_dict(_snake_case )
return dataset
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = get_dataset()
_A = make_duplicate_clusters(_UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase_ ( self : str ):
_A = get_dataset()
_A , _A = deduplicate_dataset(_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
print(_UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , _UpperCAmelCase )
| 364
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( _snake_case : int ) -> Any:
'''simple docstring'''
random.seed(_snake_case )
np.random.seed(_snake_case )
torch.manual_seed(_snake_case )
torch.cuda.manual_seed_all(_snake_case )
# ^^ safe to call this function even if cuda is not available
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Iterable[torch.nn.Parameter] , _UpperCAmelCase : float = 0.9999 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[float, int] = 1.0 , _UpperCAmelCase : Union[float, int] = 2 / 3 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Dict[str, Any] = None , **_UpperCAmelCase : Optional[int] , ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get('max_value' , _UpperCAmelCase ) is not None:
_A = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['max_value']
if kwargs.get('min_value' , _UpperCAmelCase ) is not None:
_A = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['min_value']
_A = list(_UpperCAmelCase )
_A = [p.clone().detach() for p in parameters]
if kwargs.get('device' , _UpperCAmelCase ) is not None:
_A = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
self.to(device=kwargs['device'] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ):
_A , _A = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
_A = model_cls.from_pretrained(_UpperCAmelCase )
_A = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(_UpperCAmelCase )
return ema_model
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop('shadow_params' , _UpperCAmelCase )
model.register_to_config(**_UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ):
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(_UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_A = max(_UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
_A = list(_UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = list(_UpperCAmelCase )
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None ):
_A = [
p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : dict ):
_A = copy.deepcopy(_UpperCAmelCase )
_A = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_A = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , _UpperCAmelCase ):
raise ValueError('Invalid min_decay' )
_A = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , _UpperCAmelCase ):
raise ValueError('Invalid optimization_step' )
_A = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , _UpperCAmelCase ):
raise ValueError('Invalid update_after_step' )
_A = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _UpperCAmelCase ):
raise ValueError('Invalid use_ema_warmup' )
_A = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_A = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_A = state_dict.get('shadow_params' , _UpperCAmelCase )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , _UpperCAmelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 271
| 0
|
import torch
def A_ ( ) -> Optional[int]:
'''simple docstring'''
if torch.cuda.is_available():
__UpperCamelCase = torch.cuda.device_count()
else:
__UpperCamelCase = 0
print(f"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 328
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2 ) -> Dict:
lowercase__ : Any = bp_numa
lowercase__ : Optional[int] = bp_numa
lowercase__ : Tuple = bp_numa
lowercase__ : Optional[Any] = conva_get[:2]
lowercase__ : Optional[int] = conva_get[2]
lowercase__ : Optional[Any] = size_pa
lowercase__ : Union[str, Any] = rate_w
lowercase__ : Union[str, Any] = rate_t
lowercase__ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
# save model dict with pickle
lowercase__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(a , 'wb' ) as f:
pickle.dump(a , a )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , a ) -> Any:
# read saved model
with open(a , 'rb' ) as f:
lowercase__ : Optional[int] = pickle.load(a ) # noqa: S301
lowercase__ : Optional[int] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase__ : List[Any] = model_dic.get('size_pooling1' )
lowercase__ : Tuple = model_dic.get('num_bp1' )
lowercase__ : int = model_dic.get('num_bp2' )
lowercase__ : int = model_dic.get('num_bp3' )
lowercase__ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase__ : Tuple = model_dic.get('rate_thre' )
# create model instance
lowercase__ : Tuple = CNN(a , a , a , a , a , a , a )
# modify model parameter
lowercase__ : str = model_dic.get('w_conv1' )
lowercase__ : Optional[int] = model_dic.get('wkj' )
lowercase__ : Tuple = model_dic.get('vji' )
lowercase__ : str = model_dic.get('thre_conv1' )
lowercase__ : Union[str, Any] = model_dic.get('thre_bp2' )
lowercase__ : List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , a ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , a ) -> Any:
return round(a , 3 )
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[str]:
# convolution process
lowercase__ : int = convs[0]
lowercase__ : Optional[Any] = convs[1]
lowercase__ : int = np.shape(a )[0]
# get the data slice of original image data, data_focus
lowercase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a ):
for j_focus in range(0 , size_data - size_conv + 1 , a ):
lowercase__ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a ):
lowercase__ : Any = []
for i_focus in range(len(a ) ):
lowercase__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a ) )
lowercase__ : Optional[Any] = np.asmatrix(a ).reshape(
a , a )
data_featuremap.append(a )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a ) )
lowercase__ : int = np.asarray(a )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , a , a , a="average_pool" ) -> str:
# pooling process
lowercase__ : List[str] = len(featuremaps[0] )
lowercase__ : List[str] = int(size_map / size_pooling )
lowercase__ : str = []
for i_map in range(len(a ) ):
lowercase__ : List[str] = featuremaps[i_map]
lowercase__ : Optional[int] = []
for i_focus in range(0 , a , a ):
for j_focus in range(0 , a , a ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a ) )
lowercase__ : List[Any] = np.asmatrix(a ).reshape(a , a )
featuremap_pooled.append(a )
return featuremap_pooled
def _UpperCAmelCase ( self , a ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase__ : Any = []
for i in range(len(a ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : str = data_listed.getA().tolist()[0]
data_expanded.extend(a )
lowercase__ : int = np.asarray(a )
return data_expanded
def _UpperCAmelCase ( self , a ) -> Dict:
# expanding matrix to one dimension list
lowercase__ : Dict = np.asarray(a )
lowercase__ : Union[str, Any] = np.shape(a )
lowercase__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[Any]:
lowercase__ : Dict = []
lowercase__ : int = 0
for i_map in range(a ):
lowercase__ : str = np.ones((size_map, size_map) )
for i in range(0 , a , a ):
for j in range(0 , a , a ):
lowercase__ : Optional[Any] = pd_pool[
i_pool
]
lowercase__ : Union[str, Any] = i_pool + 1
lowercase__ : List[Any] = np.multiply(
a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a )
return pd_all
def _UpperCAmelCase ( self , a , a , a , a , a , a=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(a )) )
print((' - - Shape: Teach_Data ', np.shape(a )) )
lowercase__ : int = 0
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(a ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : int = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ : Union[str, Any] = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(a , self.size_poolinga )
lowercase__ : Tuple = np.shape(a )
lowercase__ : List[str] = self._expand(a )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Optional[Any] = np.dot(a , self.vji.T ) - self.thre_bpa
lowercase__ : str = self.sig(a )
lowercase__ : Tuple = np.dot(a , self.wkj.T ) - self.thre_bpa
lowercase__ : Any = self.sig(a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Any = np.multiply(
np.dot(a , self.wkj ) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.dot(a , self.vji )
lowercase__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : Any = pd_conva_pooled.T.getA().tolist()
lowercase__ : List[str] = self._calculate_gradient_from_pool(
a , a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Tuple = self.rate_weight * np.dot(a , a )
lowercase__ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : str = rp + 1
lowercase__ : List[str] = error_count / patterns
all_mse.append(a )
def draw_error():
lowercase__ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a , '+-' )
plt.plot(a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , a ) -> List[Any]:
# model predict
lowercase__ : Optional[int] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(a )) )
for p in range(len(a ) ):
lowercase__ : List[str] = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ : Tuple = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Any = self.pooling(a , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(a )
lowercase__ : Optional[Any] = data_bp_input
lowercase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Optional[Any] = self.sig(a )
lowercase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : List[str] = self.sig(a )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Optional[int] = [list(map(self.do_round , a ) ) for each in produce_out]
return np.asarray(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# return the data of image after convoluting process so we can check it out
lowercase__ : Any = np.asmatrix(a )
lowercase__ , lowercase__ : str = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Tuple = self.pooling(a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 77
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Any = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE : Optional[int] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->List[str]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : int = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Tuple = image_processor(_lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[Any] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = '''lower newer'''
SCREAMING_SNAKE_CASE : int = processor(text=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = '''lower newer'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[str] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Optional[int] = processor.batch_decode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = '''lower newer'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[str] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 19
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = '''▁'''
a__ : List[Any] = {'''vocab_file''': '''spiece.model'''}
a__ : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
a__ : str = {
'''google/pegasus-xsum''': 512,
}
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<mask_2>" , _lowerCamelCase="<mask_1>" , _lowerCamelCase=None , _lowerCamelCase=103 , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
SCREAMING_SNAKE_CASE : Dict = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"""
F""" {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE : Dict = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = mask_token_sent
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowerCAmelCase ( self ) ->int:
return len(self.sp_model ) + self.offset
def __lowerCAmelCase ( self ) ->Dict[str, int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.piece_to_id(_lowerCamelCase )
return sp_id + self.offset
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->str:
return 1
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 19
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
@register_to_config
def __init__(self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> str:
super().__init__()
_lowercase =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowercase =torch.zeros(UpperCAmelCase , UpperCAmelCase )
else:
_lowercase =None
_lowercase =torch.nn.Parameter(UpperCAmelCase )
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> int:
super().__init__()
self.register_modules(
vqvae=UpperCAmelCase , transformer=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , scheduler=UpperCAmelCase , learned_classifier_free_sampling_embeddings=UpperCAmelCase , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =len(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else 1
# get prompt text embeddings
_lowercase =self.tokenizer(
UpperCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_lowercase =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowercase =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
_lowercase =text_input_ids[:, : self.tokenizer.model_max_length]
_lowercase =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowercase =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowercase =prompt_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowercase =self.learned_classifier_free_sampling_embeddings.embeddings
_lowercase =negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCAmelCase , 1 , 1 )
else:
_lowercase =[''''''] * batch_size
_lowercase =text_input_ids.shape[-1]
_lowercase =self.tokenizer(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' , )
_lowercase =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowercase =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowercase =negative_prompt_embeds.shape[1]
_lowercase =negative_prompt_embeds.repeat(1 , UpperCAmelCase , 1 )
_lowercase =negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self , UpperCAmelCase , UpperCAmelCase = 1_0_0 , UpperCAmelCase = 5.0 , UpperCAmelCase = 1.0 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =1
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =len(UpperCAmelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase )}" )
_lowercase =batch_size * num_images_per_prompt
_lowercase =guidance_scale > 1.0
_lowercase =self._encode_prompt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase , UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(UpperCAmelCase )}." )
# get the initial completely masked latents unless the user supplied it
_lowercase =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowercase =self.transformer.num_vector_embeds - 1
_lowercase =torch.full(UpperCAmelCase , UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
_lowercase =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
_lowercase =self.scheduler.timesteps.to(self.device )
_lowercase =latents
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowercase =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowercase =self.transformer(UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase ).sample
if do_classifier_free_guidance:
_lowercase , _lowercase =model_output.chunk(2 )
_lowercase =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCAmelCase , dim=1 , keepdim=UpperCAmelCase )
_lowercase =self.truncate(UpperCAmelCase , UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
_lowercase =model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_lowercase =self.scheduler.step(UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_lowercase =self.vqvae.config.vq_embed_dim
_lowercase =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowercase =self.vqvae.quantize.get_codebook_entry(UpperCAmelCase , shape=UpperCAmelCase )
_lowercase =self.vqvae.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase ).sample
_lowercase =(image / 2 + 0.5).clamp(0 , 1 )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase =self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> torch.FloatTensor:
_lowercase , _lowercase =torch.sort(UpperCAmelCase , 1 , descending=UpperCAmelCase )
_lowercase =torch.exp(UpperCAmelCase )
_lowercase =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowercase =torch.full_like(keep_mask[:, 0:1, :] , UpperCAmelCase )
_lowercase =torch.cat((all_true, keep_mask) , dim=1 )
_lowercase =keep_mask[:, :-1, :]
_lowercase =keep_mask.gather(1 , indices.argsort(1 ) )
_lowercase =log_p_x_0.clone()
_lowercase =-torch.inf # -inf = log(0)
return rv
| 5
|
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase : str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 245
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE : Dict = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE : str = output[output != -float("""inf""" )]
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class a__ ( unittest.TestCase , UpperCAmelCase ):
"""simple docstring"""
if is_tf_available():
UpperCAmelCase__ : Optional[Any] ={
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def _lowercase ( self : int ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Tuple = 2
class a__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
super(UpperCAmelCase__ , self ).__init__()
SCREAMING_SNAKE_CASE : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase__ , )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : Any = [[2, 0], [1_0_2, 1_0_3]]
SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE : Dict = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} )
SCREAMING_SNAKE_CASE : Optional[int] = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
SCREAMING_SNAKE_CASE : int = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE : Tuple = serving_func(**UpperCAmelCase__ )["""sequences"""]
SCREAMING_SNAKE_CASE : List[str] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : int = 2
class a__ ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
super(UpperCAmelCase__ , self ).__init__()
SCREAMING_SNAKE_CASE : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase__ , )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : List[Any] = [[2], [1_0_2, 1_0_3]]
SCREAMING_SNAKE_CASE : List[Any] = [[1], [1, 1]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} )
SCREAMING_SNAKE_CASE : int = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""]
for input_row in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE : str = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE : List[str] = serving_func(**UpperCAmelCase__ )["""sequences"""]
SCREAMING_SNAKE_CASE : List[Any] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def _lowercase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase__ )
class a__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , """spiece.model""" ) , """rb""" ).read() )
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def _lowercase ( self : int , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.tokenize(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
SCREAMING_SNAKE_CASE : str = complete_model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 1_0,
"""temperature""": 0.7,
}
SCREAMING_SNAKE_CASE : Tuple = 1_4
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : List[Any] = """Hello, my dog is cute and"""
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Dict = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : int = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE : Dict = [6_3_8, 1_9_8]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : Dict = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self : str ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
SCREAMING_SNAKE_CASE : List[Any] = """Hugging Face is a technology company based in New York and Paris."""
SCREAMING_SNAKE_CASE : Optional[int] = bart_tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE : int = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ ).numpy()
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict ) ->List[str]:
"""simple docstring"""
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class a__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE : Tuple = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo="""bar""" )
| 245
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> float:
"""simple docstring"""
lowercase__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowercase__ = 1 - (matter_density + radiation_density + dark_energy)
lowercase__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase : int = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 2
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_snake_case = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase_ = Features({"audio": Audio()} )
UpperCAmelCase_ = Features({"transcription": Value("string" )} )
UpperCAmelCase_ = "audio"
UpperCAmelCase_ = "transcription"
def A_ ( self : List[str], _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column], _UpperCAmelCase ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ : List[Any] = self.input_schema.copy()
SCREAMING_SNAKE_CASE__ : int = features[self.audio_column]
SCREAMING_SNAKE_CASE__ : Dict = input_schema
return task_template
@property
def A_ ( self : Optional[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 361
|
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = name
SCREAMING_SNAKE_CASE__ : Tuple = val
def __str__( self : str ) -> List[Any]:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : str, _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.build_heap(_UpperCAmelCase )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.get_value(_UpperCAmelCase )
def A_ ( self : int, _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return (idx - 1) // 2
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return idx * 2 + 1
def A_ ( self : str, _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return idx * 2 + 2
def A_ ( self : Tuple, _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.heap_dict[key]
def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent_idx(_UpperCAmelCase )
for idx, i in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = idx
SCREAMING_SNAKE_CASE__ : Optional[Any] = i.val
for i in range(_UpperCAmelCase, -1, -1 ):
self.sift_down(_UpperCAmelCase, _UpperCAmelCase )
return array
def A_ ( self : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE__ : str = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741
SCREAMING_SNAKE_CASE__ : Tuple = self.get_right_child_idx(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = idx
if l < len(_UpperCAmelCase ) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE__ : List[Any] = l
if r < len(_UpperCAmelCase ) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE__ : int = r
if smallest != idx:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = smallest
else:
break
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_parent_idx(_UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE__ : Dict = p
SCREAMING_SNAKE_CASE__ : List[str] = self.get_parent_idx(_UpperCAmelCase )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
return self.heap[0]
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE__ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
self.heap.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = len(self.heap ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def A_ ( self : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE__ : Tuple = new_value
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCamelCase : Tuple = Node('''R''', -1)
_lowerCamelCase : int = Node('''B''', 6)
_lowerCamelCase : str = Node('''A''', 3)
_lowerCamelCase : Optional[Any] = Node('''X''', 1)
_lowerCamelCase : str = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCamelCase : int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Dict = """\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"""
@dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :Union[PIL.Image.Image, np.ndarray]
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE_ , image_encoder=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , renderer=SCREAMING_SNAKE_CASE_ , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
UpperCamelCase : Optional[Any] = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : Dict = latents.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : str = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(image[0] , torch.Tensor ):
UpperCamelCase : Dict = torch.cat(SCREAMING_SNAKE_CASE_ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE_ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
UpperCamelCase : Tuple = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
UpperCamelCase : List[str] = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.image_encoder(SCREAMING_SNAKE_CASE_ )["last_hidden_state"]
UpperCamelCase : Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCamelCase : Optional[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self , A_ , A_ = 1 , A_ = 25 , A_ = None , A_ = None , A_ = 4.0 , A_ = 64 , A_ = "pil" , A_ = True , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
UpperCamelCase : Tuple = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
UpperCamelCase : str = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCamelCase : str = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE_ )}""" )
UpperCamelCase : int = self._execution_device
UpperCamelCase : Optional[Any] = batch_size * num_images_per_prompt
UpperCamelCase : int = guidance_scale > 1.0
UpperCamelCase : Optional[Any] = self._encode_image(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.scheduler.timesteps
UpperCamelCase : Optional[int] = self.prior.config.num_embeddings
UpperCamelCase : int = self.prior.config.embedding_dim
UpperCamelCase : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCamelCase : Union[str, Any] = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Optional[int] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.prior(
SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , proj_embedding=SCREAMING_SNAKE_CASE_ , ).predicted_image_embedding
# remove the variance
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCamelCase , UpperCamelCase : int = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCamelCase : str = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE_ ):
print()
UpperCamelCase : Dict = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCamelCase : List[str] = images.cpu().numpy()
if output_type == "pil":
UpperCamelCase : List[str] = [self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 52
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowerCAmelCase__):
return model(**lowerCAmelCase__)
eval(**lowerCAmelCase__).block_until_ready()
@slow
def snake_case_ ( self):
for model_name in ["roberta-base", "roberta-large"]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowerCAmelCase__):
return model(**lowerCAmelCase__)
eval(**lowerCAmelCase__).block_until_ready()
def snake_case_ ( self):
with self.assertRaisesRegex(
lowerCAmelCase__ , """bert-base is not a local folder and is not a valid model identifier"""):
__SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained("""bert-base""")
def snake_case_ ( self):
with self.assertRaisesRegex(
lowerCAmelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"""):
__SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCAmelCase__ , revision="""aaaaaa""")
def snake_case_ ( self):
with self.assertRaisesRegex(
lowerCAmelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
__SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""")
def snake_case_ ( self):
with self.assertRaisesRegex(lowerCAmelCase__ , """Use `from_pt=True` to load this model"""):
__SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""")
| 255
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Any = RoCBertTokenizer
__lowercase : List[str] = None
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = True
__lowercase : int = filter_non_english
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(lowerCAmelCase__ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def snake_case_ ( self):
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def snake_case_ ( self):
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def snake_case_ ( self):
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def snake_case_ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , """do_lower_case""") else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__)
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你好""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你是谁""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__SCREAMING_SNAKE_CASE = """你好,你是谁"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 255
| 1
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 116
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=0.0, lowerCamelCase__ = None, lowerCamelCase__ = "geglu", lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = True, lowerCamelCase__ = "layer_norm", lowerCamelCase__ = False, ):
super().__init__()
A : Dict = only_cross_attention
A : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
A : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A : Dict = AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A : List[str] = AdaLayerNormZero(lowerCamelCase__, lowerCamelCase__ )
else:
A : Tuple = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : Any = Attention(
query_dim=lowerCamelCase__, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase__, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A : int = (
AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
)
A : Dict = Attention(
query_dim=lowerCamelCase__, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, upcast_attention=lowerCamelCase__, ) # is self-attn if encoder_hidden_states is none
else:
A : Dict = None
A : Dict = None
# 3. Feed-forward
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : int = FeedForward(lowerCamelCase__, dropout=lowerCamelCase__, activation_fn=lowerCamelCase__, final_dropout=lowerCamelCase__ )
# let chunk size default to None
A : Optional[Any] = None
A : int = 0
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# Sets chunk feed-forward
A : List[str] = chunk_size
A : int = dim
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A : Optional[int] = self.norma(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A : Tuple = self.norma(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hidden_dtype=hidden_states.dtype )
else:
A : Tuple = self.norma(lowerCamelCase__ )
A : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A : str = self.attna(
lowerCamelCase__, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
if self.use_ada_layer_norm_zero:
A : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
A : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A : Optional[int] = (
self.norma(lowerCamelCase__, lowerCamelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase__ )
)
A : Optional[Any] = self.attna(
lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = attn_output + hidden_states
# 3. Feed-forward
A : str = self.norma(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A : Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A : Optional[Any] = torch.cat(
[self.ff(lowerCamelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase__, dim=self._chunk_dim )], dim=self._chunk_dim, )
else:
A : Dict = self.ff(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
A : Optional[int] = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "geglu", lowerCamelCase__ = False, ):
super().__init__()
A : str = int(dim * mult )
A : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A : Dict = GELU(lowerCamelCase__, lowerCamelCase__ )
if activation_fn == "gelu-approximate":
A : Optional[int] = GELU(lowerCamelCase__, lowerCamelCase__, approximate="""tanh""" )
elif activation_fn == "geglu":
A : Any = GEGLU(lowerCamelCase__, lowerCamelCase__ )
elif activation_fn == "geglu-approximate":
A : Optional[Any] = ApproximateGELU(lowerCamelCase__, lowerCamelCase__ )
A : Dict = nn.ModuleList([] )
# project in
self.net.append(lowerCamelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCamelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCamelCase__, lowerCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
for module in self.net:
A : int = module(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = "none" ):
super().__init__()
A : Optional[int] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : int = approximate
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__, approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
A : Union[str, Any] = self.gelu(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, dim_out * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A : Any = self.proj(lowerCamelCase__ ).chunk(2, dim=-1 )
return hidden_states * self.gelu(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Dict = nn.Embedding(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = nn.SiLU()
A : Tuple = nn.Linear(lowerCamelCase__, embedding_dim * 2 )
A : List[str] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = self.linear(self.silu(self.emb(lowerCamelCase__ ) ) )
A , A : Optional[int] = torch.chunk(lowerCamelCase__, 2 )
A : Tuple = self.norm(lowerCamelCase__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = CombinedTimestepLabelEmbeddings(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = nn.SiLU()
A : str = nn.Linear(lowerCamelCase__, 6 * embedding_dim, bias=lowerCamelCase__ )
A : List[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__, eps=1e-6 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A : Tuple = self.linear(self.silu(self.emb(lowerCamelCase__, lowerCamelCase__, hidden_dtype=lowerCamelCase__ ) ) )
A , A , A , A , A , A : Optional[Any] = emb.chunk(6, dim=1 )
A : List[str] = self.norm(lowerCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 1e-5 ):
super().__init__()
A : int = num_groups
A : Dict = eps
if act_fn is None:
A : Union[str, Any] = None
else:
A : Optional[int] = get_activation(lowerCamelCase__ )
A : Dict = nn.Linear(lowerCamelCase__, out_dim * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if self.act:
A : Dict = self.act(lowerCamelCase__ )
A : Optional[int] = self.linear(lowerCamelCase__ )
A : int = emb[:, :, None, None]
A , A : Tuple = emb.chunk(2, dim=1 )
A : Any = F.group_norm(lowerCamelCase__, self.num_groups, eps=self.eps )
A : List[str] = x * (1 + scale) + shift
return x
| 116
| 1
|
import random
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[int] , _a : str ):
snake_case_ : Optional[int] = a[left_index]
snake_case_ : Optional[Any] = left_index + 1
for j in range(left_index + 1 , _a ):
if a[j] < pivot:
snake_case_ , snake_case_ : str = a[i], a[j]
i += 1
snake_case_ , snake_case_ : Union[str, Any] = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase__ ( _a : Tuple , _a : Optional[int] , _a : Tuple ):
if left < right:
snake_case_ : List[str] = random.randint(_a , right - 1 )
snake_case_ , snake_case_ : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
snake_case_ : Tuple = partition(_a , _a , _a )
quick_sort_random(
_a , _a , _a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_a , pivot_index + 1 , _a ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase__ ( ):
snake_case_ : Tuple = input("Enter numbers separated by a comma:\n" ).strip()
snake_case_ : Any = [int(_a ) for item in user_input.split("," )]
quick_sort_random(_a , 0 , len(_a ) )
print(_a )
if __name__ == "__main__":
main()
| 363
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Optional[int] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ['input_ids', 'attention_mask']
A : Dict = GPTaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = kwargs.pop("add_bos_token" , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
snake_case_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
snake_case_ : int = add_prefix_space
snake_case_ : str = pre_tok_class(**_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = add_prefix_space
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
snake_case_ : Dict = kwargs.get("is_split_into_words" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
snake_case_ : Dict = kwargs.get("is_split_into_words" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
snake_case_ : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
snake_case_ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
snake_case_ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 36
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowerCamelCase : Tuple = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
A__ , A__ , A__ = _str_to_version_tuple(self.version_str)
def __repr__( self : Dict) ->List[str]:
'''simple docstring'''
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return Version(UpperCAmelCase__)
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return other
raise TypeError(f"""{other} (type {type(UpperCAmelCase__)}) cannot be compared to version.""")
def __eq__( self : Tuple , UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
try:
A__ = self._validate_operand(UpperCAmelCase__)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = self._validate_operand(UpperCAmelCase__)
return self.tuple < other.tuple
def __hash__( self : int) ->Any:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , UpperCAmelCase__ : int) ->List[str]:
'''simple docstring'''
A__ = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
return self.version_str
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = _VERSION_REG.match(lowercase_ )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowercase_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return ".".join(str(lowercase_ ) for v in version_tuple )
| 14
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A__ ( UpperCAmelCase_ ):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A__ ( UpperCAmelCase_ ):
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : int = metric_id
class lowercase__ :
lowercase__ = [MetricMock(__a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if "tmp_path" in args:
_UpperCamelCase : List[Any] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(UpperCAmelCase_ , match='https://huggingface.co/docs/evaluate' ):
func(*UpperCAmelCase_ )
| 360
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[int] = np.shape(UpperCAmelCase_ )
if rows != columns:
_UpperCamelCase : Union[str, Any] = (
'\'table\' has to be of square shaped array but got a '
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = np.zeros((rows, columns) )
_UpperCamelCase : Tuple = np.zeros((rows, columns) )
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_UpperCamelCase : Optional[Any] = (table[i][j] - total) / upper[j][j]
_UpperCamelCase : int = 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
_UpperCamelCase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
lowerCamelCase_ = "backbone." if is_semantic else ""
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', "beit.embeddings.cls_token"),
(F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"),
(F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"),
(F'{prefix}pos_embed', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
for i in range(config.num_hidden_layers ):
lowerCamelCase_ = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = q_bias
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
lowerCamelCase_ = gamma_a
lowerCamelCase_ = gamma_a
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=lowerCamelCase__ , use_mask_token=lowerCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase_ = 1_6
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "rvlcdip-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
lowerCamelCase_ = create_rename_keys(lowerCamelCase__ , has_lm_head=lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , has_lm_head=lowerCamelCase__ )
# load HuggingFace model
lowerCamelCase_ = BeitForMaskedImageModeling(lowerCamelCase__ ) if has_lm_head else BeitForImageClassification(lowerCamelCase__ )
model.eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
lowerCamelCase_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = encoding["pixel_values"]
lowerCamelCase_ = model(lowerCamelCase__ )
lowerCamelCase_ = outputs.logits
# verify logits
lowerCamelCase_ = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCamelCase__ ), "Shape of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
if has_lm_head:
lowerCamelCase_ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase_ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase__ , )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 19
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19
| 1
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
A__ : List[Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
A__ : Optional[Any] = '</w>'
A__ : int = '@@ '
def _snake_case ( lowerCamelCase__ : Any ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] =set()
lowerCamelCase_ : int =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ : List[Any] =char
return pairs
# Speech2Text2 has no max input length
A__ : int = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class snake_case__ ( _A ):
_UpperCAmelCase :Any = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : int , snake_case__ : Optional[Any]="<s>" , snake_case__ : str="<pad>" , snake_case__ : Any="</s>" , snake_case__ : Optional[int]="<unk>" , snake_case__ : Dict=False , snake_case__ : str=None , **snake_case__ : Union[str, Any] , ):
super().__init__(
unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ : Optional[Any] =do_lower_case
with open(__SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
lowerCamelCase_ : Any =json.load(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Union[str, Any] ={v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
lowerCamelCase_ : Tuple =None
lowerCamelCase_ : Union[str, Any] =None
else:
with open(__SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowerCamelCase_ : List[Any] =merges_handle.read().split("\n" )[:-1]
lowerCamelCase_ : Any =[tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase_ : Tuple =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ : int ={}
@property
def UpperCAmelCase__ ( self : str ):
return len(self.decoder )
def UpperCAmelCase__ ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : str ):
lowerCamelCase_ : Any =tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase_ : Dict =get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowerCamelCase_ : int =min(__SCREAMING_SNAKE_CASE , key=lambda snake_case__ : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ : str =bigram
lowerCamelCase_ : Union[str, Any] =[]
lowerCamelCase_ : Optional[int] =0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
lowerCamelCase_ : Optional[int] =word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ : List[Any] =j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ : int =tuple(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any =new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowerCamelCase_ : int =get_pairs(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str =" ".join(__SCREAMING_SNAKE_CASE )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase_ : Tuple ="\n" + BPE_TOKEN_MERGES
if word.endswith(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : List[str] =word.replace(__SCREAMING_SNAKE_CASE , "" )
lowerCamelCase_ : List[Any] =word.replace(" " , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] =word
return word
def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[str] ):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
lowerCamelCase_ : Any =text.lower()
lowerCamelCase_ : str =text.split()
lowerCamelCase_ : Optional[int] =[]
for token in text:
if token:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str ):
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int ):
lowerCamelCase_ : int =self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
return result
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] ):
lowerCamelCase_ : Optional[Any] =" ".join(__SCREAMING_SNAKE_CASE )
# make sure @@ tokens are concatenated
lowerCamelCase_ : Dict ="".join(string.split(__SCREAMING_SNAKE_CASE ) )
return string
def UpperCAmelCase__ ( self : str , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : str =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + "\n" )
lowerCamelCase_ : str =0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase_ : Tuple =token_index
writer.write(" ".join(__SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A__ : Optional[int] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase_ : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase_ : ClassVar[Features] = Features({} )
lowerCAmelCase_ : str = "text"
@property
def lowerCAmelCase__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 85
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( __a : Dict[str, torch.Tensor] ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for rt in rc.restypes:
UpperCamelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase__ = {name: i for i, name in enumerate(__a )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase__ = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
__a , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCamelCase__ = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase__ = rc.restype_atoa[restype_letter]
UpperCamelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase__ = rc.atom_order[atom_name]
UpperCamelCase__ = 1
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
return protein
def __magic_name__ ( __a : Dict[str, torch.Tensor] ):
'''simple docstring'''
UpperCamelCase__ = tree_map(lambda __a : torch.tensor(__a , device=batch["""aatype"""].device ) , __a , np.ndarray )
UpperCamelCase__ = tensor_tree_map(lambda __a : np.array(__a ) , make_atomaa_masks(__a ) )
return out
| 244
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE :Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None )-> Any:
UpperCamelCase_ = None
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase_ = os.path.abspath("examples" )
for item in os.listdir(_lowercase ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowercase , feature_script=_lowercase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase_ = compare_against_test(
os.path.join(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = "\n".join(_lowercase )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ = diff.replace(_lowercase , "" )
self.assertEqual(_lowercase , "" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , _lowercase )
self.one_complete_example("complete_nlp_example.py" , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase_ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = False
@classmethod
def UpperCAmelCase_ ( cls )-> List[str]:
super().setUpClass()
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls )-> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
else:
self.assertIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
UpperCamelCase_ = re.findall("({.+})" , _lowercase )
UpperCamelCase_ = [r for r in results if "accuracy" in r][-1]
UpperCamelCase_ = ast.literal_eval(_lowercase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "tracking" ) ) )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 60
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :Tuple = 16
SCREAMING_SNAKE_CASE :Optional[Any] = 32
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1_6 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["lr"]
UpperCamelCase_ = int(config["num_epochs"] )
UpperCamelCase_ = int(config["seed"] )
UpperCamelCase_ = int(config["batch_size"] )
UpperCamelCase_ = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
UpperCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase_ = 1
UpperCamelCase_ = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase_ = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ = 0
UpperCamelCase_ = evaluate.load("glue" , "mrpc" )
UpperCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCamelCase_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_ = int(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
UpperCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_ = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_ = f"epoch_{epoch}"
UpperCamelCase_ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = accuracy
UpperCamelCase_ = lr_scheduler.get_lr()[0]
UpperCamelCase_ = optimizer.param_groups[0]["lr"]
UpperCamelCase_ = epoch
UpperCamelCase_ = overall_step
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 60
| 1
|
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( lowerCAmelCase : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(_UpperCAmelCase , 0 , _UpperCAmelCase , args=(_UpperCAmelCase) )[0]
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
return math.pow(_UpperCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[Any] ={
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int =[
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A_ : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A_ : Union[str, Any] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a__ , )
super().__init__(*a__ , **a__ )
| 80
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase__ ):
__a = '''upernet'''
def __init__( self : Dict , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Any=512 , _lowerCamelCase : Optional[Any]=0.0_2 , _lowerCamelCase : List[str]=[1, 2, 3, 6] , _lowerCamelCase : int=True , _lowerCamelCase : str=0.4 , _lowerCamelCase : Optional[Any]=384 , _lowerCamelCase : int=256 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=255 , **_lowerCamelCase : Any , ):
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_snake_case = CONFIG_MAPPING["""resnet"""](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case = backbone_config.get('''model_type''' )
_snake_case = CONFIG_MAPPING[backbone_model_type]
_snake_case = config_class.from_dict(lowerCAmelCase__ )
_snake_case = backbone_config
_snake_case = hidden_size
_snake_case = initializer_range
_snake_case = pool_scales
_snake_case = use_auxiliary_head
_snake_case = auxiliary_loss_weight
_snake_case = auxiliary_in_channels
_snake_case = auxiliary_channels
_snake_case = auxiliary_num_convs
_snake_case = auxiliary_concat_input
_snake_case = loss_ignore_index
def lowercase ( self : List[str] ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.backbone_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 288
|
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Any = []
for arabic, roman in ROMAN:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 0
|
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
snake_case : str = (0, 0)
snake_case : int = None
snake_case : Tuple = 0
snake_case : int = 0
snake_case : Union[str, Any] = 0
def __eq__( self , A ) -> Dict:
return self.position == cell.position
def UpperCAmelCase ( self ) -> Dict:
print(self.position )
class __lowercase :
"""simple docstring"""
def __init__( self , A=(5, 5) ) -> Dict:
snake_case : Union[str, Any] = np.zeros(A )
snake_case : int = world_size[0]
snake_case : Any = world_size[1]
def UpperCAmelCase ( self ) -> Any:
print(self.w )
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
snake_case : Dict = cell.position[0]
snake_case : Optional[Any] = cell.position[1]
snake_case : Union[str, Any] = []
for n in neughbour_cord:
snake_case : List[Any] = current_x + n[0]
snake_case : Optional[int] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
snake_case : Optional[Any] = Cell()
snake_case : Dict = (x, y)
snake_case : List[Any] = cell
neighbours.append(A )
return neighbours
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : Any = []
snake_case : Optional[Any] = []
_open.append(lowercase )
while _open:
snake_case : Optional[Any] = np.argmin([n.f for n in _open] )
snake_case : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(lowercase ) )
if current == goal:
break
for n in world.get_neigbours(lowercase ):
for c in _closed:
if c == n:
continue
snake_case : str = current.g + 1
snake_case : Any = n.position
snake_case : Tuple = goal.position
snake_case : int = (ya - ya) ** 2 + (xa - xa) ** 2
snake_case : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase )
snake_case : Any = []
while current.parent is not None:
path.append(current.position )
snake_case : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase : List[Any] = Gridworld()
# Start position and goal
lowerCamelCase : Optional[Any] = Cell()
lowerCamelCase : List[Any] = (0, 0)
lowerCamelCase : Optional[Any] = Cell()
lowerCamelCase : Optional[Any] = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
lowerCamelCase : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase : Optional[Any] = 1
print(world.w)
| 364
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = ConsistencyModelPipeline
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dict = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def UpperCAmelCase ( self , A=False ) -> Optional[Any]:
if class_cond:
snake_case : List[str] = self.dummy_cond_unet
else:
snake_case : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
snake_case : Union[str, Any] = torch.manual_seed(A )
else:
snake_case : Dict = torch.Generator(device=A ).manual_seed(A )
snake_case : Tuple = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : Dict = ConsistencyModelPipeline(**A )
snake_case : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Any = self.get_dummy_inputs(A )
snake_case : Tuple = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Tuple = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Tuple = self.get_dummy_components(class_cond=A )
snake_case : Dict = ConsistencyModelPipeline(**A )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : str = self.get_dummy_inputs(A )
snake_case : Optional[int] = 0
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.get_dummy_components()
snake_case : List[str] = ConsistencyModelPipeline(**A )
snake_case : Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Dict = self.get_dummy_inputs(A )
snake_case : Tuple = 1
snake_case : Optional[Any] = None
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components(class_cond=A )
snake_case : List[Any] = ConsistencyModelPipeline(**A )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : Optional[Any] = 1
snake_case : Any = None
snake_case : Optional[int] = 0
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , A=0 , A=False , A="cpu" , A=torch.floataa , A=(1, 3, 6_4, 6_4) ) -> int:
snake_case : Union[str, Any] = torch.manual_seed(A )
snake_case : Tuple = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
snake_case : Optional[int] = self.get_fixed_latents(seed=A , device=A , dtype=A , shape=A )
snake_case : int = latents
return inputs
def UpperCAmelCase ( self , A=0 , A="cpu" , A=torch.floataa , A=(1, 3, 6_4, 6_4) ) -> Any:
if type(A ) == str:
snake_case : List[str] = torch.device(A )
snake_case : Any = torch.Generator(device=A ).manual_seed(A )
snake_case : Dict = randn_tensor(A , generator=A , device=A , dtype=A )
return latents
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Any = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
snake_case : Tuple = self.get_inputs()
snake_case : List[str] = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : List[str] = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
snake_case : Union[str, Any] = self.get_inputs()
snake_case : Tuple = 1
snake_case : List[str] = None
snake_case : Any = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : List[Any] = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
snake_case : List[str] = self.get_inputs(get_fixed_latents=A , device=A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
snake_case : Tuple = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : int = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Any = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Dict = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = self.get_inputs(get_fixed_latents=A , device=A )
snake_case : Union[str, Any] = 1
snake_case : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 176
| 0
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : float , _lowerCAmelCase : Callable , _lowerCAmelCase : int , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : str = None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
snake_case_ = initial_learning_rate
snake_case_ = warmup_steps
snake_case_ = power
snake_case_ = decay_schedule_fn
snake_case_ = name
def __call__( self : List[Any] , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case_ = tf.cast(_lowerCAmelCase , tf.floataa )
snake_case_ = tf.cast(self.warmup_steps , tf.floataa )
snake_case_ = global_step_float / warmup_steps_float
snake_case_ = self.initial_learning_rate * tf.math.pow(_lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCAmelCase , )
def lowerCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 0.9 , lowerCAmelCase_ :float = 0.9_9_9 , lowerCAmelCase_ :float = 1e-8 , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 1.0 , lowerCAmelCase_ :Optional[List[str]] = None , )->Dict:
'''simple docstring'''
snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
snake_case_ = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
snake_case_ = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=lowerCAmelCase_ , )
else:
snake_case_ = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCAmelCase : float = 0.9 , _lowerCAmelCase : float = 0.999 , _lowerCAmelCase : float = 1e-7 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "AdamWeightDecay" , **_lowerCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
snake_case_ = weight_decay_rate
snake_case_ = include_in_weight_decay
snake_case_ = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , _lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ = {"WarmUp": WarmUp}
return super(_lowerCAmelCase , cls ).from_config(_lowerCAmelCase , custom_objects=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
super(_lowerCAmelCase , self )._prepare_local(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = list(zip(*_lowerCAmelCase ) )
return super(_lowerCAmelCase , self ).apply_gradients(zip(_lowerCAmelCase , _lowerCAmelCase ) , name=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case_ = apply_state or {}
snake_case_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case_ = self._fallback_apply_state(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=None ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase )
snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCAmelCase , self )._resource_apply_dense(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase )
snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCAmelCase , self )._resource_apply_sparse(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None:
return False
return True
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = None
@property
def lowerCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
if self._accum_steps is None:
snake_case_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
if not self._gradients:
snake_case_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowerCAmelCase ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(_lowerCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , _lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowerCAmelCase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowerCAmelCase ) )
| 159
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 159
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
def __init__(self : Dict , *__UpperCAmelCase : str , **__UpperCAmelCase : str ) -> None:
"""simple docstring"""
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 354
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(__UpperCAmelCase )
def __call__( self : List[Any] , __UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCAmelCase : Any ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , **__UpperCAmelCase : str ):
'''simple docstring'''
return {}, {}, {}
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = load_image(__UpperCAmelCase )
_A = image.size
_A = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = model_outputs.predicted_depth
_A = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__UpperCAmelCase )
_A = prediction.squeeze().cpu().numpy()
_A = (output * 255 / np.max(__UpperCAmelCase )).astype("uint8" )
_A = Image.fromarray(__UpperCAmelCase )
_A = {}
_A = predicted_depth
_A = depth
return output_dict
| 79
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=__a) as mock_head:
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor")
self.assertIsNotNone(__a)
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=__a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 36
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(matrix[0] )
_snake_case = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for row in range(_SCREAMING_SNAKE_CASE ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _SCREAMING_SNAKE_CASE ):
_snake_case = matrix[col][row] / matrix[row][row]
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_snake_case = True
for i in range(row + 1 , _SCREAMING_SNAKE_CASE ):
if matrix[i][row] != 0:
_snake_case, _snake_case = matrix[i], matrix[row]
_snake_case = False
break
if reduce:
rank -= 1
for i in range(_SCREAMING_SNAKE_CASE ):
_snake_case = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "nat"
lowerCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> str:
super().__init__(**UpperCAmelCase )
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(UpperCAmelCase )
_snake_case = num_heads
_snake_case = kernel_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = layer_norm_eps
_snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_snake_case = layer_scale_init_value
_snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_snake_case, _snake_case = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 270
| 0
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_ : int = data_utils.TransfoXLCorpus
lowercase_ : Dict = data_utils
lowercase_ : Dict = data_utils
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case_ )
_UpperCAmelCase = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case_ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 133
|
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_lowerCAmelCase : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
_lowerCAmelCase : int = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def snake_case ( self ):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : str = field(
default=lowerCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case ,snake_case ,snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case ,snake_case ,snake_case = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case = data_args.train_file.split('.' )[-1]
snake_case = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case = load_dataset('csv' , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case = load_dataset('json' , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case = raw_datasets['train'].features['label'].names
snake_case = len(_UpperCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCamelCase , )
snake_case = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case = {'Refused': 0, 'Entailed': 1}
snake_case = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCamelCase : Optional[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCamelCase : Tuple ):
snake_case = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case = examples['statement']
snake_case = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case = tokenizer(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase )
snake_case = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case = raw_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : EvalPrediction ):
snake_case = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
snake_case = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case = default_data_collator
elif training_args.fpaa:
snake_case = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
snake_case = None
# Initialize our Trainer
snake_case = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case = None
if training_args.resume_from_checkpoint is not None:
snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case = last_checkpoint
snake_case = trainer.train(resume_from_checkpoint=_UpperCamelCase )
snake_case = train_result.metrics
snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
snake_case = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCamelCase )
trainer.save_metrics('train' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case = trainer.evaluate(eval_dataset=_UpperCamelCase )
snake_case = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
snake_case = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case = predict_dataset.remove_columns('label' )
snake_case = trainer.predict(_UpperCamelCase , metric_key_prefix='predict' ).predictions
snake_case = np.argmax(_UpperCamelCase , axis=1 )
snake_case = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCamelCase ):
snake_case = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
SCREAMING_SNAKE_CASE__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149
| 0
|
'''simple docstring'''
import math
import random
def __lowerCamelCase ( _lowercase , _lowercase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a : Union[str, Any] = 0.0_2
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
UpperCAmelCase : Any = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(_lowercase ):
# Forward propagation
UpperCAmelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase : List[str] = (expected / 1_0_0) - layer_a
# Error delta
UpperCAmelCase : Dict = layer_1_error * sigmoid_function(_lowercase , _lowercase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
a : List[str] = int(input("""Expected value: """))
a : List[str] = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 265
|
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Any = {'vocab_file': 'vocab.txt'}
__lowerCamelCase : Union[str, Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__lowerCamelCase : int = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__lowerCamelCase : List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class A__ ( lowerCAmelCase__ ):
_UpperCAmelCase :int = VOCAB_FILES_NAMES
_UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[Any] = ConvBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("strip_accents" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("type" ) )
UpperCamelCase : Any = do_lower_case
UpperCamelCase : Dict = strip_accents
UpperCamelCase : int = tokenize_chinese_chars
UpperCamelCase : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = do_lower_case
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : List[Any] = [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 355
|
import os
def A_ ( ) -> int:
UpperCamelCase : List[str] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
UpperCamelCase : Any = os.path.join(_lowerCAmelCase , "triangle.txt" )
with open(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
UpperCamelCase : Tuple = []
for line in triangle:
UpperCamelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCAmelCase ) )
a.append(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
for j in range(len(a[i] ) ):
UpperCamelCase : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase : Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCAmelCase , _lowerCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 140
| 0
|
import re
from filelock import FileLock
try:
import nltk
_lowerCamelCase =True
except (ImportError, ModuleNotFoundError):
_lowerCamelCase =False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _a ( lowerCamelCase ):
re.sub("""<n>""", """""", lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase ) )
| 287
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287
| 1
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ["CLIPEncoderLayer"]
def __init__( self : str , lowercase_ : CLIPConfig):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPVisionModelWithProjection(config.vision_config)
SCREAMING_SNAKE_CASE_ : str = nn.Linear(config.vision_config.projection_dim , 1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any]=0.5 , lowercase_ : Optional[int]=0.5):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.vision_model(lowercase_)[0]
SCREAMING_SNAKE_CASE_ : Any = self.p_head(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE_ : int = nsfw_detected.tolist()
if any(lowercase_):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(lowercase_):
if nsfw_detected_:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros(images[idx].shape)
SCREAMING_SNAKE_CASE_ : List[str] = self.w_head(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = watermark_detected.flatten()
SCREAMING_SNAKE_CASE_ : str = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE_ : Optional[int] = watermark_detected.tolist()
if any(lowercase_):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(lowercase_):
if watermark_detected_:
SCREAMING_SNAKE_CASE_ : Any = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 361
|
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ : List[Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 80
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80
| 1
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase_ (__a : Optional[Any] , __a : Tuple=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any]=0 ):
_a : Dict = []
for old_item in old_list:
_a : List[str] = old_item.replace('in_layers.0' , 'norm1' )
_a : Any = new_item.replace('in_layers.2' , 'conv1' )
_a : Dict = new_item.replace('out_layers.0' , 'norm2' )
_a : str = new_item.replace('out_layers.3' , 'conv2' )
_a : Tuple = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Dict = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : List[Any] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase_ (__a : List[str] , __a : Tuple=0 ):
_a : int = []
for old_item in old_list:
_a : List[str] = old_item
_a : Union[str, Any] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Tuple = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Dict = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Dict = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[Any] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Tuple , __a : Union[str, Any]=None , __a : Any=None , __a : Optional[Any]=None ):
assert isinstance(__a , __a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : str = old_checkpoint[path]
_a : List[Any] = old_tensor.shape[0] // 3
_a : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : List[Any] = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a : Any = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(__a )
_a : Tuple = key.reshape(__a )
_a : List[str] = value.reshape(__a )
for path in paths:
_a : List[str] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : int = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : Optional[Any] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Tuple = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Optional[Any] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : Dict = old_checkpoint[path['old']][:, :, 0]
else:
_a : int = old_checkpoint[path['old']]
def UpperCAmelCase_ (__a : List[str] , __a : str ):
_a : Tuple = {}
_a : Any = checkpoint['time_embed.0.weight']
_a : Dict = checkpoint['time_embed.0.bias']
_a : Dict = checkpoint['time_embed.2.weight']
_a : List[Any] = checkpoint['time_embed.2.bias']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.weight']
_a : Tuple = checkpoint['input_blocks.0.0.bias']
_a : int = checkpoint['out.0.weight']
_a : List[str] = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : str = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_a : Dict = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_a : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
for i in range(1 , __a ):
_a : Union[str, Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : int = (i - 1) % (config['num_res_blocks'] + 1)
_a : List[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Union[str, Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Optional[Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Dict = renew_resnet_paths(__a )
_a : List[Any] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[int] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path, resnet_op] , config=__a )
if len(__a ):
_a : List[str] = renew_attention_paths(__a )
_a : Optional[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : int = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=__a , config=__a , )
_a : List[str] = middle_blocks[0]
_a : Any = middle_blocks[1]
_a : Tuple = middle_blocks[2]
_a : str = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
_a : Union[str, Any] = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
_a : Tuple = renew_attention_paths(__a )
_a : Optional[Any] = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__a , __a , __a , attention_paths_to_split=__a , config=__a )
for i in range(__a ):
_a : Any = i // (config['num_res_blocks'] + 1)
_a : List[str] = i % (config['num_res_blocks'] + 1)
_a : Optional[Any] = [shave_segments(__a , 2 ) for name in output_blocks[i]]
_a : Tuple = {}
for layer in output_block_layers:
_a : int = layer.split('.' )[0], shave_segments(__a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_a : Optional[int] = [layer_name]
if len(__a ) > 1:
_a : Any = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Optional[Any] = renew_resnet_paths(__a )
_a : Union[str, Any] = renew_resnet_paths(__a )
_a : List[Any] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : Dict = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_a : Tuple = []
if len(__a ):
_a : Optional[int] = renew_attention_paths(__a )
_a : Dict = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : str = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=__a , )
else:
_a : Dict = renew_resnet_paths(__a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : List[Any] = '.'.join(['output_blocks', str(__a ), path['old']] )
_a : Tuple = '.'.join(['up_blocks', str(__a ), 'resnets', str(__a ), path['new']] )
_a : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
__lowerCAmelCase = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 350
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5
| 0
|
"""simple docstring"""
def lowercase ( __snake_case : str ):
lowercase_ : Any = 1
lowercase_ : int = 2
while i * i <= n:
lowercase_ : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase ( ):
lowercase_ : int = 1
lowercase_ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(_A ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class A ( __UpperCAmelCase ):
__snake_case = 'vit'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = encoder_stride
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278
| 0
|
"""simple docstring"""
from math import factorial, pi
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 30 ) -> float:
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase__ : Union[str, Any] = float(__lowerCamelCase )
lowercase__ : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 30 ) -> float:
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase__ : int = float(__lowerCamelCase )
lowercase__ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 302
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302
| 1
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
snake_case_ : Tuple = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
lowercase__ = None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , ):
import pyspark
def generate_fn():
_UpperCamelCase : List[str] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_UpperCamelCase : int = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' )
_UpperCamelCase : int = partition_df.collect()
_UpperCamelCase : List[Any] = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable ):
def __init__( self : List[Any] ,lowerCamelCase__ : "pyspark.sql.DataFrame" ,lowerCamelCase__ : Dict=None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = df
_UpperCamelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCamelCase : str = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : np.random.Generator ):
'''simple docstring'''
_UpperCamelCase : Tuple = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase__ )
return SparkExamplesIterable(self.df ,partition_order=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : str = self.split_shard_indices_by_worker(lowerCamelCase__ ,lowerCamelCase__ )
return SparkExamplesIterable(self.df ,partition_order=lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder ):
lowercase__ = SparkConfig
def __init__( self : Tuple ,lowerCamelCase__ : "pyspark.sql.DataFrame" ,lowerCamelCase__ : str = None ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
import pyspark
_UpperCamelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCamelCase : Dict = df
_UpperCamelCase : Optional[Any] = working_dir
super().__init__(
cache_dir=lowerCamelCase__ ,config_name=str(self.df.semanticHash() ) ,**lowerCamelCase__ ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase__ : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = os.path.join(self._cache_dir ,'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase__ ,'a' )
return [probe_file]
if self._spark.conf.get('spark.master' ,'' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCamelCase : Any = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(lowerCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase__ : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_UpperCamelCase : List[Any] = self.df.count()
_UpperCamelCase : int = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCamelCase : Dict = (
self.df.limit(lowerCamelCase__ )
.repartition(1 )
.mapInArrow(lowerCamelCase__ ,'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCamelCase : Union[str, Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCamelCase : Dict = min(lowerCamelCase__ ,int(approx_total_size / max_shard_size ) )
_UpperCamelCase : str = self.df.repartition(lowerCamelCase__ )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,):
'''simple docstring'''
import pyspark
_UpperCamelCase : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
_UpperCamelCase : List[str] = os.path.join(self._working_dir ,os.path.basename(lowerCamelCase__ ) ) if self._working_dir else fpath
_UpperCamelCase : Optional[Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCamelCase : str = self.config.features
_UpperCamelCase : Dict = self._writer_batch_size
_UpperCamelCase : str = self._fs.storage_options
def write_arrow(lowerCamelCase__ : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCamelCase : Dict = pyspark.TaskContext().taskAttemptId()
_UpperCamelCase : Optional[int] = next(lowerCamelCase__ ,lowerCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Any = writer_class(
features=lowerCamelCase__ ,path=working_fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,writer_batch_size=lowerCamelCase__ ,storage_options=lowerCamelCase__ ,embed_local_files=lowerCamelCase__ ,)
_UpperCamelCase : Any = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCamelCase , _UpperCamelCase : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
shard_id += 1
_UpperCamelCase : str = writer_class(
features=writer._features ,path=working_fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,writer_batch_size=lowerCamelCase__ ,storage_options=lowerCamelCase__ ,embed_local_files=lowerCamelCase__ ,)
_UpperCamelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase__ )
if writer._num_bytes > 0:
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase__ ) ):
_UpperCamelCase : List[str] = os.path.join(os.path.dirname(lowerCamelCase__ ) ,os.path.basename(lowerCamelCase__ ) )
shutil.move(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : str = (
self.df.mapInArrow(lowerCamelCase__ ,'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) ,pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) ,pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) ,pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : "datasets.SplitGenerator" ,lowerCamelCase__ : str = "arrow" ,lowerCamelCase__ : Optional[Union[str, int]] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
self._validate_cache_dir()
_UpperCamelCase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = not is_remote_filesystem(self._fs )
_UpperCamelCase : Optional[int] = os.path.join if is_local else posixpath.join
_UpperCamelCase : Dict = '-TTTTT-SSSSS-of-NNNNN'
_UpperCamelCase : Union[str, Any] = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_UpperCamelCase : Tuple = path_join(self._output_dir ,lowerCamelCase__ )
_UpperCamelCase : int = 0
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 0
_UpperCamelCase : Dict = []
_UpperCamelCase : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = total_num_examples
_UpperCamelCase : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_UpperCamelCase : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCamelCase : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,):
rename(
lowerCamelCase__ ,fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,fpath.replace('TTTTT-SSSSS' ,F'{global_shard_id:05d}' ).replace('NNNNN' ,F'{total_shards:05d}' ) ,)
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Optional[int] = 0
for i in range(len(lowerCamelCase__ ) ):
_UpperCamelCase , _UpperCamelCase : str = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase__ ,len(lowerCamelCase__ ) ).map(lambda lowerCamelCase__ : _rename_shard(*lowerCamelCase__ ) ).collect()
else:
# don't use any pattern
_UpperCamelCase : Dict = 0
_UpperCamelCase : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,fpath.replace(lowerCamelCase__ ,'' ) ,)
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : "datasets.SplitGenerator" ,):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 83
|
"""simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyImgaImgPipeline
__magic_name__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__magic_name__ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__magic_name__ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case = MultilingualCLIP(__snake_case )
snake_case = text_encoder.eval()
return text_encoder
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self , __snake_case , __snake_case=0 ):
snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case = '''A red cartoon frog, 4k'''
snake_case = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case = pipeline(
__snake_case , image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 360
|
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 213
| 0
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def __snake_case ( UpperCAmelCase_ : str ):
def decorator(UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , "handle_key" , [] )
handle += [key]
setattr(UpperCAmelCase_ , "handle_key" , UpperCAmelCase_ )
return func
return decorator
def __snake_case ( *UpperCAmelCase_ : List[str] ):
def decorator(UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , "handle_key" , [] )
handle += keys
setattr(UpperCAmelCase_ , "handle_key" , UpperCAmelCase_ )
return func
return decorator
class snake_case ( lowercase ):
"""simple docstring"""
def __new__( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = super().__new__(cls , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not hasattr(UpperCamelCase , "key_handler" ):
setattr(UpperCamelCase , "key_handler" , {} )
setattr(UpperCamelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase_ = getattr(UpperCamelCase , "handle_key" , [] )
for key in handled_keys:
lowerCamelCase_ = value
return new_cls
@staticmethod
def snake_case ( cls ):
"""simple docstring"""
lowerCamelCase_ = get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase_ = ord(UpperCamelCase )
lowerCamelCase_ = cls.key_handler.get(UpperCamelCase )
if handler:
lowerCamelCase_ = char
return handler(cls )
else:
return None
def __snake_case ( cls : int ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 55
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ : Dict = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model=UpperCamelCase , tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
lowerCamelCase_ = "What is the placebo?"
lowerCamelCase_ = [
{
"image": load_image(UpperCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = dqa_pipeline(UpperCamelCase , top_k=2 )
self.assertEqual(
UpperCamelCase , [
[
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "How many cats are there?"
lowerCamelCase_ = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , words=UpperCamelCase , boxes=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
| 55
| 1
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , A_ , A_ , A_ ) -> str:
lowerCAmelCase = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
for example in examples:
lowerCAmelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
] , )
@require_torch
def __snake_case ( self ) -> int:
lowerCAmelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowerCAmelCase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowerCAmelCase = pipeline(
"""video-classification""" , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def __snake_case ( self ) -> str:
pass
| 362
|
'''simple docstring'''
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case:
'''simple docstring'''
def __init__( self ) -> int:
lowerCAmelCase = [
[],
[],
[],
]
def __snake_case ( self , A_ , A_ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __snake_case ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ) -> str:
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class __snake_case:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def __snake_case ( self ) -> int:
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCAmelCase = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self ) -> str:
return str(self.queue )
def _snake_case ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 187
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = logging.get_logger()
# the current default level is logging.WARNING
_lowerCAmelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__snake_case )
def lowercase__ ( self : int ) -> str:
_lowerCAmelCase = logging.get_verbosity()
_lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_lowerCAmelCase = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(__snake_case )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowercase__ ( self : Tuple ) -> Tuple:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , __snake_case )
_lowerCAmelCase = logging.log_levels[env_level_str]
_lowerCAmelCase = logging.get_verbosity()
self.assertEqual(
__snake_case , __snake_case , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
_lowerCAmelCase = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowercase__ ( self : Optional[int] ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase = logging.logging.getLogger()
with CaptureLogger(__snake_case ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowercase__ ( self : Dict ) -> Any:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_lowerCAmelCase = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
def UpperCamelCase__ ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 70
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size
| 86
| 0
|
'''simple docstring'''
from __future__ import annotations
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = 0 ) -> Dict:
lowerCAmelCase = key
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __snake_case ( self , A_ , A_ ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 187
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 187
| 1
|
from math import sqrt
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( _UpperCAmelCase : int = 10_001 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(_UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339
|
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'efficientformer'
def __init__( self : List[Any] , _A : List[int] = [3, 2, 6, 4] , _A : List[int] = [48, 96, 224, 448] , _A : List[bool] = [True, True, True, True] , _A : int = 448 , _A : int = 32 , _A : int = 4 , _A : int = 7 , _A : int = 5 , _A : int = 8 , _A : int = 4 , _A : float = 0.0 , _A : int = 16 , _A : int = 3 , _A : int = 3 , _A : int = 3 , _A : int = 2 , _A : int = 1 , _A : float = 0.0 , _A : int = 1 , _A : bool = True , _A : bool = True , _A : float = 1e-5 , _A : str = "gelu" , _A : float = 0.0_2 , _A : float = 1e-12 , _A : int = 224 , _A : float = 1e-05 , **_A : Tuple , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Tuple = downsamples
UpperCAmelCase__ : Dict = dim
UpperCAmelCase__ : Any = key_dim
UpperCAmelCase__ : Optional[int] = attention_ratio
UpperCAmelCase__ : Tuple = resolution
UpperCAmelCase__ : Union[str, Any] = pool_size
UpperCAmelCase__ : Optional[int] = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : str = drop_path_rate
UpperCAmelCase__ : List[str] = num_metaad_blocks
UpperCAmelCase__ : Optional[Any] = distillation
UpperCAmelCase__ : Any = use_layer_scale
UpperCAmelCase__ : Any = layer_scale_init_value
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : int = batch_norm_eps
| 299
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299
| 1
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :str = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
UpperCamelCase__ :str = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__snake_case = HfArgumentParser(PretokenizationArguments)
__snake_case = parser.parse_args()
if args.num_workers is None:
__snake_case = multiprocessing.cpu_count()
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case = time.time()
__snake_case = load_dataset(args.dataset_name, split='''train''')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__snake_case = time.time()
__snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 97
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content
| 97
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf')
__lowercase =unmasker('My name is <mask>')
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 3_8_0_1_5, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 2_5_5_0_6, 'token_str': ' accuser'},
] , )
__lowercase =unmasker('The largest city in France is <mask>')
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 3_8_0_1_5,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 2_5_5_0_6,
'token_str': ' accuser',
},
] , )
__lowercase =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3)
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2_9_4_1, 'token_str': ' Te'},
] , )
@require_torch
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt')
__lowercase =unmasker('My name is <mask>')
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 3_5_6_7_6, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
] , )
__lowercase =unmasker('The largest city in France is <mask>')
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
] , )
__lowercase =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3)
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2_9_4_1, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
] , )
__lowercase =unmasker('My name is <mask> <mask>' , top_k=2)
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6) , [
[
{
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt')
# convert model to fp16
pipe.model.half()
__lowercase =pipe('Paris is the [MASK] of France.')
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
@slow
@require_torch
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt')
self.run_large_test(_lowerCAmelCase)
@slow
@require_tf
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf')
self.run_large_test(_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =unmasker('My name is <mask>')
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 6_1_0, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1_5_7_3, 'token_str': ' Chris'},
] , )
__lowercase =unmasker('The largest city in France is <mask>')
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2_2_0_1,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_2_7_9_0,
'token_str': ' Lyon',
},
] , )
__lowercase =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3)
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2_9_4_1, 'token_str': ' Te'},
] , )
@require_torch
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt')
__lowercase =None
__lowercase =None
self.run_pipeline_test(_lowerCAmelCase , [])
@require_tf
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf')
__lowercase =None
__lowercase =None
self.run_pipeline_test(_lowerCAmelCase , [])
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)')
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
__lowercase =[
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =fill_masker.tokenizer
__lowercase =fill_masker.model
__lowercase =fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_lowerCAmelCase , [
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
] , )
__lowercase =fill_masker([f"""This is a {tokenizer.mask_token}"""])
self.assertEqual(
_lowerCAmelCase , [
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
] , )
__lowercase =fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""])
self.assertEqual(
_lowerCAmelCase , [
[
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
],
[
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
],
] , )
with self.assertRaises(_lowerCAmelCase):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(_lowerCAmelCase):
fill_masker('This is')
self.run_test_top_k(_lowerCAmelCase , _lowerCAmelCase)
self.run_test_targets(_lowerCAmelCase , _lowerCAmelCase)
self.run_test_top_k_targets(_lowerCAmelCase , _lowerCAmelCase)
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCAmelCase , _lowerCAmelCase)
self.fill_mask_with_multiple_masks(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =tokenizer.get_vocab()
__lowercase =sorted(vocab.keys())[:2]
# Pipeline argument
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , targets=_lowerCAmelCase)
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""")
self.assertEqual(
_lowerCAmelCase , [
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
] , )
__lowercase ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _lowerCAmelCase)
__lowercase =[tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_lowerCAmelCase))
# Call argument
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_lowerCAmelCase)
self.assertEqual(
_lowerCAmelCase , [
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
] , )
__lowercase ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _lowerCAmelCase)
__lowercase =[tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_lowerCAmelCase))
# Score equivalence
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_lowerCAmelCase)
__lowercase =[top_mask['token_str'] for top_mask in outputs]
__lowercase =[top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase) == set(_lowerCAmelCase):
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_lowerCAmelCase)
__lowercase =[top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCAmelCase) , nested_simplify(_lowerCAmelCase))
# Raises with invalid
with self.assertRaises(_lowerCAmelCase):
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCAmelCase):
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''])
with self.assertRaises(_lowerCAmelCase):
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='')
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , top_k=2)
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""")
self.assertEqual(
_lowerCAmelCase , [
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
] , )
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2)
self.assertEqual(
_lowerCAmelCase , [
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
] , )
self.assertEqual(nested_simplify(_lowerCAmelCase) , nested_simplify(_lowerCAmelCase))
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =tokenizer.get_vocab()
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
# top_k=2, ntargets=3
__lowercase =sorted(vocab.keys())[:3]
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_lowerCAmelCase)
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowercase =[el['token_str'] for el in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase: x["score"] , reverse=_lowerCAmelCase)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase).issubset(_lowerCAmelCase):
__lowercase =fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_lowerCAmelCase)
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCAmelCase) , nested_simplify(_lowerCAmelCase))
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
__lowercase =tokenizer.get_vocab()
# String duplicates + id duplicates
__lowercase =sorted(vocab.keys())[:3]
__lowercase =[targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowercase =fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_lowerCAmelCase , top_k=1_0)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCAmelCase) , 3)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
__lowercase =fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2)
self.assertEqual(
_lowerCAmelCase , [
[
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
],
[
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
],
[
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
{'sequence': ANY(_lowerCAmelCase), 'score': ANY(_lowerCAmelCase), 'token': ANY(_lowerCAmelCase), 'token_str': ANY(_lowerCAmelCase)},
],
] , )
| 354
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =XLNetConfig.from_json_file(_lowerCAmelCase )
__lowercase =finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
__lowercase =finetuning_task
__lowercase =GLUE_TASKS_NUM_LABELS[finetuning_task]
__lowercase =XLNetForSequenceClassification(_lowerCAmelCase )
elif "squad" in finetuning_task:
__lowercase =finetuning_task
__lowercase =XLNetForQuestionAnswering(_lowerCAmelCase )
else:
__lowercase =XLNetLMHeadModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
print(f"""Save configuration file to {os.path.abspath(_lowerCAmelCase )}""" )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 48
| 0
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : str = "▁" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, AddedToken] = "<unk>" , UpperCAmelCase__ : Union[str, AddedToken] = "</s>" , UpperCAmelCase__ : Union[str, AddedToken] = "<pad>" , ) ->Optional[Any]:
'''simple docstring'''
A__ = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
A__ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
A__ = token_dict['''token''']
A__ = Tokenizer(Unigram())
A__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''') , ''' '''),
normalizers.Lowercase(),
])
A__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase__),
pre_tokenizers.Punctuation(),
])
A__ = decoders.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__)
A__ = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
A__ = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 8_000 , UpperCAmelCase__ : bool = True , ) ->Tuple:
'''simple docstring'''
A__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
self._tokenizer.train(UpperCAmelCase__ , trainer=UpperCAmelCase__)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCAmelCase__ : int = 8_000 , UpperCAmelCase__ : bool = True , ) ->List[Any]:
'''simple docstring'''
A__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
self._tokenizer.train_from_iterator(UpperCAmelCase__ , trainer=UpperCAmelCase__)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = json.loads(self._tokenizer.to_str())
A__ = self.special_tokens['''unk''']['''id''']
A__ = Tokenizer.from_str(json.dumps(UpperCAmelCase__))
| 14
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 1
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = logging.get_verbosity()
UpperCAmelCase_ : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ : Any = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
UpperCAmelCase_ : int = logging.log_levels[env_level_str]
UpperCAmelCase_ : int = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCAmelCase_ : List[str] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : Union[str, Any] = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : List[Any] = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 274
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = None ):
'''simple docstring'''
self.set_timesteps(snake_case_ )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_ : int = 4
# running values
UpperCAmelCase_ : str = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_ : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_ : Dict = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_ : str = timesteps.to(snake_case_ )
UpperCAmelCase_ : Any = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCAmelCase_ : Any = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_ : Optional[Any] = timestep_index + 1
UpperCAmelCase_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case_ )
if len(self.ets ) == 1:
UpperCAmelCase_ : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_ : List[str] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCAmelCase_ : Union[str, Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_ : Union[str, Any] = self._get_prev_sample(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def _UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return sample
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = self.alphas[timestep_index]
UpperCAmelCase_ : Union[str, Any] = self.betas[timestep_index]
UpperCAmelCase_ : Any = self.alphas[prev_timestep_index]
UpperCAmelCase_ : Dict = self.betas[prev_timestep_index]
UpperCAmelCase_ : List[Any] = (sample - sigma * ets) / max(snake_case_ , 1E-8 )
UpperCAmelCase_ : Tuple = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 274
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCamelCase ( snake_case__ , snake_case__):
"""simple docstring"""
UpperCamelCase__ = "nat"
UpperCamelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(UpperCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 39
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 99
| 0
|
from math import factorial
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 100 ):
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE_ , str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 216
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a):
lowercase__ , lowercase__ : Dict = text, pattern
lowercase__ , lowercase__ : Any = len(a), len(a)
def snake_case_ ( self , a):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def snake_case_ ( self , a):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case_ ( self):
# searches pattern in text and returns index positions
lowercase__ : Any = []
for i in range(self.textLen - self.patLen + 1):
lowercase__ : Optional[Any] = self.mismatch_in_text(a)
if mismatch_index == -1:
positions.append(a)
else:
lowercase__ : Optional[int] = self.match_in_pattern(self.text[mismatch_index])
lowercase__ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case_ = '''ABAABA'''
snake_case_ = '''AB'''
snake_case_ = BoyerMooreSearch(text, pattern)
snake_case_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 216
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] ="camembert"
def __init__( self : int , a : Dict=3_05_22 , a : List[Any]=7_68 , a : int=12 , a : Union[str, Any]=12 , a : int=30_72 , a : Any="gelu" , a : Optional[Any]=0.1 , a : Any=0.1 , a : List[Any]=5_12 , a : List[str]=2 , a : Union[str, Any]=0.02 , a : List[Any]=1e-1_2 , a : Any=1 , a : List[str]=0 , a : Optional[int]=2 , a : Union[str, Any]="absolute" , a : Optional[int]=True , a : List[str]=None , **a : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class a__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 67
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
"""simple docstring"""
UpperCamelCase__ : int =XGLMConfig
UpperCamelCase__ : Optional[Any] ={}
UpperCamelCase__ : List[str] ="""gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Tuple =use_input_mask
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =d_model
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : Optional[int] =ffn_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Any =activation_dropout
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Optional[int] =max_position_embeddings
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Optional[Any] =2
__UpperCamelCase : str =1
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase : Union[str, Any] =None
if self.use_input_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =self.get_config()
__UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int =config_and_inputs
__UpperCamelCase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase : Union[str, Any] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
__UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
__UpperCamelCase : Optional[int] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =inputs['input_ids']
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 71
| 0
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A__ : Union[str, Any] = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def a_ ( ) -> Optional[int]:
__snake_case : List[str] = Github(os.environ['GITHUB_TOKEN'] )
__snake_case : Any = g.get_repo('huggingface/accelerate' )
__snake_case : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
__snake_case : Tuple = sorted([comment for comment in issue.get_comments()] ,key=lambda _UpperCAmelCase : i.created_at ,reverse=_UpperCAmelCase )
__snake_case : Tuple = comments[0] if len(_UpperCAmelCase ) > 0 else None
__snake_case : Dict = dt.utcnow()
__snake_case : Dict = (current_time - issue.updated_at).days
__snake_case : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 0
|
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0
| 1
|
from __future__ import annotations
from cmath import sqrt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
__a = b * b - 4 * a * c
__a = (-b + sqrt(a__ )) / (2 * a)
__a = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCAmelCase ( ) -> Tuple:
__a , __a = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 6
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : str = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
| 91
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: Tuple = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_A, _A ) ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__magic_name__ : str = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_A )
try:
if dataset.shape[1] != value_array.shape[1]:
__magic_name__ : Optional[Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__magic_name__ : List[Any] = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_A )
__magic_name__ : Dict = []
for value in value_array:
__magic_name__ : Tuple = euclidean(_A, dataset[0] )
__magic_name__ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
__magic_name__ : Any = euclidean(_A, _A )
if dist > temp_dist:
__magic_name__ : Dict = temp_dist
__magic_name__ : Any = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return np.dot(_A, _A ) / (norm(_A ) * norm(_A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *_lowerCamelCase , **_lowerCamelCase ) ->str:
pass
@is_pipeline_test
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : Any = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCamelCase ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE : Optional[int] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
] , )
@require_tf
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : int = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
] , )
@slow
@require_torch
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE : int = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
"""simple docstring"""
def A ( snake_case :Tuple = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> Optional[int]:
try:
__UpperCamelCase = int(snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__UpperCamelCase = 1
__UpperCamelCase = 2
while i * i <= n:
while n % i == 0:
__UpperCamelCase = i
n //= i
i += 1
if n > 1:
__UpperCamelCase = n
return int(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 359
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( snake_case :str , snake_case :str = "cpu" , snake_case :Union[str, None] = None ) -> None:
__UpperCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 263
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.