code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from math import factorial def _snake_case ( _snake_case : int = 100 ): return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
314
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : int = -1 lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : str = TextStreamer(UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() lowerCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = -1 lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = -1 lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : str = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[str] = '''''' for new_text in streamer: streamer_text += new_text
314
1
"""simple docstring""" def _snake_case ( _snake_case : int ): lowerCAmelCase : Any = 0 lowerCAmelCase : Optional[Any] = len(_snake_case ) for i in range(n - 1 ): for j in range(i + 1 , _snake_case ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _snake_case ( _snake_case : Dict ): if len(_snake_case ) <= 1: return arr, 0 lowerCAmelCase : Union[str, Any] = len(_snake_case ) // 2 lowerCAmelCase : List[Any] = arr[0:mid] lowerCAmelCase : Tuple = arr[mid:] lowerCAmelCase, lowerCAmelCase : List[Any] = count_inversions_recursive(_snake_case ) lowerCAmelCase, lowerCAmelCase : List[Any] = count_inversions_recursive(_snake_case ) lowerCAmelCase, lowerCAmelCase : int = _count_cross_inversions(_snake_case , _snake_case ) lowerCAmelCase : Dict = inversion_p + inversions_q + cross_inversions return c, num_inversions def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ): lowerCAmelCase : Optional[int] = [] lowerCAmelCase : int = 0 while i < len(_snake_case ) and j < len(_snake_case ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_snake_case ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_snake_case ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _snake_case ( ): lowerCAmelCase : int = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowerCAmelCase : Optional[Any] = count_inversions_bf(_snake_case ) lowerCAmelCase, lowerCAmelCase : Tuple = count_inversions_recursive(_snake_case ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , _snake_case ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowerCAmelCase : List[Any] = count_inversions_bf(_snake_case ) lowerCAmelCase, lowerCAmelCase : Dict = count_inversions_recursive(_snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , _snake_case ) # an empty list should also have zero inversions lowerCAmelCase : Optional[int] = [] lowerCAmelCase : Any = count_inversions_bf(_snake_case ) lowerCAmelCase, lowerCAmelCase : Any = count_inversions_recursive(_snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , _snake_case ) if __name__ == "__main__": main()
314
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
1
"""simple docstring""" import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case_( unittest.TestCase ): def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Optional[Any]=1_8 , UpperCamelCase_ : Union[str, Any]=3_0 , UpperCamelCase_ : Tuple=4_0_0 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=[0.5, 0.5, 0.5] , UpperCamelCase_ : Dict=[0.5, 0.5, 0.5] , UpperCamelCase_ : int=False , ): lowerCAmelCase : List[str] = size if size is not None else {'''height''': 2_0, '''width''': 2_0} lowerCAmelCase : str = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} lowerCAmelCase : Any = parent lowerCAmelCase : List[Any] = batch_size lowerCAmelCase : Dict = num_channels lowerCAmelCase : Optional[Any] = image_size lowerCAmelCase : Tuple = min_resolution lowerCAmelCase : Union[str, Any] = max_resolution lowerCAmelCase : Dict = do_resize lowerCAmelCase : Dict = size lowerCAmelCase : Any = do_center_crop lowerCAmelCase : Tuple = crop_size lowerCAmelCase : Union[str, Any] = do_normalize lowerCAmelCase : Optional[int] = image_mean lowerCAmelCase : Union[str, Any] = image_std lowerCAmelCase : int = do_reduce_labels def lowerCamelCase__ ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _snake_case ( ): lowerCAmelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) lowerCAmelCase : List[str] = Image.open(dataset[0]['''file'''] ) lowerCAmelCase : List[str] = Image.open(dataset[1]['''file'''] ) return image, map def _snake_case ( ): lowerCAmelCase : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) lowerCAmelCase : List[str] = Image.open(ds[0]['''file'''] ) lowerCAmelCase : Union[str, Any] = Image.open(ds[1]['''file'''] ) lowerCAmelCase : Optional[Any] = Image.open(ds[2]['''file'''] ) lowerCAmelCase : Any = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = BeitImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = BeitImageProcessingTester(self ) @property def lowerCamelCase__ ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase_ ) lowerCAmelCase : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): pass def lowerCamelCase__ ( self : int ): # Initialize image_processing lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase__ ( self : Tuple ): # Initialize image_processing lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase__ ( self : Optional[int] ): # Initialize image_processing lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase__ ( self : Dict ): # Initialize image_processing lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [] for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input lowerCAmelCase : Any = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) # Test batched lowerCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) # Test not batched input (PIL images) lowerCAmelCase, lowerCAmelCase : Any = prepare_semantic_single_inputs() lowerCAmelCase : str = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) # Test batched input (PIL images) lowerCAmelCase, lowerCAmelCase : List[Any] = prepare_semantic_batch_inputs() lowerCAmelCase : Tuple = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) def lowerCamelCase__ ( self : Optional[Any] ): # Initialize image_processing lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 lowerCAmelCase, lowerCAmelCase : Union[str, Any] = prepare_semantic_single_inputs() lowerCAmelCase : List[Any] = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 ) lowerCAmelCase : int = True lowerCAmelCase : str = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
314
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ): lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0] lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ ) lowerCAmelCase : Any = nsfw_detected.flatten() lowerCAmelCase : Dict = nsfw_detected > p_threshold lowerCAmelCase : int = nsfw_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ): if nsfw_detected_: lowerCAmelCase : List[Any] = np.zeros(images[idx].shape ) lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = watermark_detected.flatten() lowerCAmelCase : Optional[int] = watermark_detected > w_threshold lowerCAmelCase : Union[str, Any] = watermark_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(UpperCamelCase_ ): if watermark_detected_: lowerCAmelCase : List[str] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
314
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : int = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Union[str, Any] = ['''ConditionalDetrFeatureExtractor'''] snake_case__ : Union[str, Any] = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } snake_case__ : Union[str, Any] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } snake_case__ : Optional[Any] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase : Tuple = do_lower_case lowerCAmelCase : Union[str, Any] = strip_accents lowerCAmelCase : Tuple = tokenize_chinese_chars lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
314
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : Tuple = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } snake_case__ : Any = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : str , _snake_case : str , _snake_case : Dict ): for attribute in key.split('''.''' ): lowerCAmelCase : Tuple = getattr(_snake_case , _snake_case ) if weight_type is not None: lowerCAmelCase : Optional[Any] = getattr(_snake_case , _snake_case ).shape else: lowerCAmelCase : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCAmelCase : str = value elif weight_type == "weight_g": lowerCAmelCase : List[Any] = value elif weight_type == "weight_v": lowerCAmelCase : List[Any] = value elif weight_type == "bias": lowerCAmelCase : List[str] = value elif weight_type == "running_mean": lowerCAmelCase : List[str] = value elif weight_type == "running_var": lowerCAmelCase : Optional[int] = value elif weight_type == "num_batches_tracked": lowerCAmelCase : List[Any] = value elif weight_type == "inv_freq": lowerCAmelCase : Optional[Any] = value else: lowerCAmelCase : int = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _snake_case ( _snake_case : Any , _snake_case : List[str] , _snake_case : Tuple ): lowerCAmelCase : str = [] lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() lowerCAmelCase : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase : Tuple = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) lowerCAmelCase : Any = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase : Tuple = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCAmelCase : Union[str, Any] = True if "*" in mapped_key: lowerCAmelCase : Optional[int] = name.split(_snake_case )[0].split('''.''' )[-2] lowerCAmelCase : Dict = mapped_key.replace('''*''' , _snake_case ) if "pos_bias_u" in name: lowerCAmelCase : Any = None elif "pos_bias_v" in name: lowerCAmelCase : List[str] = None elif "weight_g" in name: lowerCAmelCase : Optional[int] = '''weight_g''' elif "weight_v" in name: lowerCAmelCase : Optional[int] = '''weight_v''' elif "bias" in name: lowerCAmelCase : Tuple = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase : Union[str, Any] = '''weight''' elif "running_mean" in name: lowerCAmelCase : List[str] = '''running_mean''' elif "inv_freq" in name: lowerCAmelCase : Tuple = '''inv_freq''' elif "running_var" in name: lowerCAmelCase : List[Any] = '''running_var''' elif "num_batches_tracked" in name: lowerCAmelCase : int = '''num_batches_tracked''' else: lowerCAmelCase : List[Any] = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Any , _snake_case : str , _snake_case : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] lowerCAmelCase : Optional[Any] = name.split('''.''' ) lowerCAmelCase : Dict = int(items[0] ) lowerCAmelCase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCAmelCase : List[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCAmelCase : List[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowerCAmelCase : Dict = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCAmelCase : Optional[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) @torch.no_grad() def _snake_case ( _snake_case : Any , _snake_case : str , _snake_case : Any=None , _snake_case : str=None , _snake_case : Any=True ): if config_path is not None: lowerCAmelCase : Optional[int] = WavaVecaConformerConfig.from_pretrained(_snake_case , hidden_act='''swish''' ) else: lowerCAmelCase : int = WavaVecaConformerConfig() if "rope" in checkpoint_path: lowerCAmelCase : Optional[int] = '''rotary''' if is_finetuned: if dict_path: lowerCAmelCase : List[Any] = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase : Any = target_dict.pad_index lowerCAmelCase : Tuple = target_dict.bos_index lowerCAmelCase : int = target_dict.eos_index lowerCAmelCase : Any = len(target_dict.symbols ) lowerCAmelCase : int = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) lowerCAmelCase : int = target_dict.indices # fairseq has the <pad> and <s> switched lowerCAmelCase : Union[str, Any] = 0 lowerCAmelCase : List[str] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) lowerCAmelCase : Dict = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) lowerCAmelCase : int = True if config.feat_extract_norm == '''layer''' else False lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) lowerCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) lowerCAmelCase : Union[str, Any] = WavaVecaConformerForCTC(_snake_case ) else: lowerCAmelCase : List[Any] = WavaVecaConformerForPreTraining(_snake_case ) if is_finetuned: lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) lowerCAmelCase : List[Any] = fairseq.tasks.setup_task(_snake_case ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) lowerCAmelCase : Union[str, Any] = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) snake_case__ : List[Any] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
"""simple docstring""" def _snake_case ( _snake_case : int ): return str(_snake_case ) == str(_snake_case )[::-1] def _snake_case ( _snake_case : int ): return int(_snake_case ) + int(str(_snake_case )[::-1] ) def _snake_case ( _snake_case : int = 10000 ): lowerCAmelCase : Optional[int] = [] for num in range(1 , _snake_case ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Any = num while iterations < 50: lowerCAmelCase : Tuple = sum_reverse(_snake_case ) iterations += 1 if is_palindrome(_snake_case ): break else: lychrel_nums.append(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
"""simple docstring""" def _snake_case ( _snake_case : int = 50000000 ): lowerCAmelCase : List[str] = set() lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) ) lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) ) for primea in primes: lowerCAmelCase : Optional[Any] = primea * primea for primea in primes: lowerCAmelCase : List[Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase : Tuple = primea * primea * primea * primea lowerCAmelCase : Tuple = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
1
"""simple docstring""" import unittest from knapsack import knapsack as k class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : List[str] = [0] lowerCAmelCase : List[Any] = [0] lowerCAmelCase : List[str] = len(UpperCamelCase_ ) self.assertEqual(k.knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 0 ) lowerCAmelCase : Any = [6_0] lowerCAmelCase : List[Any] = [1_0] lowerCAmelCase : List[Any] = len(UpperCamelCase_ ) self.assertEqual(k.knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 0 ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : List[str] = [1, 2, 3] lowerCAmelCase : Tuple = [3, 2, 1] lowerCAmelCase : Optional[Any] = len(UpperCamelCase_ ) self.assertEqual(k.knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 5 ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = 5_0 lowerCAmelCase : List[str] = [6_0, 1_0_0, 1_2_0] lowerCAmelCase : List[str] = [1_0, 2_0, 3_0] lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) self.assertEqual(k.knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 2_2_0 ) if __name__ == "__main__": unittest.main()
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Tuple = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor'''] snake_case__ : List[Any] = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] snake_case__ : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" def _snake_case ( _snake_case : int = 50000000 ): lowerCAmelCase : List[str] = set() lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) ) lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) ) for primea in primes: lowerCAmelCase : Optional[Any] = primea * primea for primea in primes: lowerCAmelCase : List[Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase : Tuple = primea * primea * primea * primea lowerCAmelCase : Tuple = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
1
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = PhobertTokenizer __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[int] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase : List[Any] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@'''] lowerCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : int = ['''#version: 0.2''', '''l à</w>'''] lowerCAmelCase : Dict = {'''unk_token''': '''<unk>'''} lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] , **UpperCamelCase_ : str ): kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Tuple = '''Tôi là VinAI Research''' lowerCAmelCase : Dict = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>''' return input_text, output_text def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[Any] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase : Any = '''Tôi là VinAI Research''' lowerCAmelCase : Optional[int] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split() lowerCAmelCase : Any = tokenizer.tokenize(UpperCamelCase_ ) print(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token] lowerCAmelCase : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
314
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : List[Any] = set() for token in tokens: lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : List[str] = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Optional[Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : str = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j] lowerCAmelCase : Union[str, Any] = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : int = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : Any = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Dict ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[str] = f.readlines() lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : int = parser.parse_args() main(args)
314
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } snake_case__ : Union[str, Any] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } snake_case__ : Optional[Any] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase : Tuple = do_lower_case lowerCAmelCase : Union[str, Any] = strip_accents lowerCAmelCase : Tuple = tokenize_chinese_chars lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
314
"""simple docstring""" import numpy as np from PIL import Image def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Dict = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : int = 0 lowerCAmelCase : Dict = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Union[str, Any] = 0 # compute the shape of the output matrix lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : int = 0 lowerCAmelCase : Tuple = 0 return updated_arr def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Union[str, Any] = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 # compute the shape of the output matrix lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : str = 0 lowerCAmelCase : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image snake_case__ : Optional[Any] = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
314
1
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets snake_case__ : Optional[int] = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' snake_case__ : Any = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' snake_case__ : str = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_( datasets.Metric ): def lowerCamelCase__ ( self : List[str] ): if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[ '''https://github.com/m-popovic/chrF''', ] , ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int = CHRF.CHAR_ORDER , UpperCamelCase_ : int = CHRF.WORD_ORDER , UpperCamelCase_ : int = CHRF.BETA , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , ): lowerCAmelCase : str = len(references[0] ) if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(UpperCamelCase_ )] lowerCAmelCase : List[Any] = CHRF(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = sb_chrf.corpus_score(UpperCamelCase_ , UpperCamelCase_ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
314
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Dict = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class snake_case_( a__ ): def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : List[Any] ): if tokenize_kwargs is None: lowerCAmelCase : Union[str, Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) lowerCAmelCase : Union[str, Any] = truncation lowerCAmelCase : List[Any] = tokenize_kwargs lowerCAmelCase : Any = {} if return_tensors is not None: lowerCAmelCase : List[str] = return_tensors return preprocess_params, {}, postprocess_params def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict ): lowerCAmelCase : Optional[int] = self.framework lowerCAmelCase : Tuple = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) return model_inputs def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Any = self.model(**UpperCamelCase_ ) return model_outputs def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : str=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Tuple , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ): return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed snake_case__ : Dict = logging.getLogger(__name__) def _snake_case ( _snake_case : Dict=2 , _snake_case : Optional[int]=3 , _snake_case : Any=16 , _snake_case : int = 10 , _snake_case : int = 2 ): def get_dataset(_snake_case : List[Any] ): lowerCAmelCase : str = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(_snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) lowerCAmelCase : str = get_dataset(_snake_case ) lowerCAmelCase : str = get_dataset(_snake_case ) lowerCAmelCase : Optional[Any] = DataLoader(_snake_case , shuffle=_snake_case , batch_size=_snake_case , num_workers=4 ) lowerCAmelCase : Tuple = DataLoader(_snake_case , shuffle=_snake_case , batch_size=_snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def _snake_case ( _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any]=None ): lowerCAmelCase : Union[str, Any] = [] for epoch in range(_snake_case ): # Train quickly model.train() for batch in dataloader: lowerCAmelCase, lowerCAmelCase : Optional[int] = batch lowerCAmelCase : List[str] = model(_snake_case ) lowerCAmelCase : List[Any] = torch.nn.functional.mse_loss(_snake_case , _snake_case ) accelerator.backward(_snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class snake_case_( nn.Module ): def __init__( self : List[Any] ): super().__init__() lowerCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) ) lowerCAmelCase : Dict = nn.Parameter(torch.randn(1 ) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] ): return x * self.a + self.b class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) lowerCAmelCase : Any = DummyModel() lowerCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase, lowerCAmelCase : str = dummy_dataloaders() lowerCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline lowerCAmelCase : Dict = Accelerator(project_config=UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[Any] = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) lowerCAmelCase : Union[str, Any] = DummyModel() lowerCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase, lowerCAmelCase : Dict = dummy_dataloaders() # Train baseline lowerCAmelCase : Union[str, Any] = Accelerator() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial lowerCAmelCase : Any = os.path.join(UpperCamelCase_ , '''initial''' ) accelerator.save_state(UpperCamelCase_ ) ((lowerCAmelCase), (lowerCAmelCase)) : Tuple = model.a.item(), model.b.item() lowerCAmelCase : int = optimizer.state_dict() lowerCAmelCase : Optional[int] = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase), (lowerCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() lowerCAmelCase : str = optimizer.state_dict() # Train partially set_seed(4_2 ) lowerCAmelCase : Any = DummyModel() lowerCAmelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase, lowerCAmelCase : Dict = dummy_dataloaders() lowerCAmelCase : List[str] = Accelerator() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.load_state(UpperCamelCase_ ) ((lowerCAmelCase), (lowerCAmelCase)) : Dict = model.a.item(), model.b.item() lowerCAmelCase : List[Any] = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save everything lowerCAmelCase : Any = os.path.join(UpperCamelCase_ , '''checkpoint''' ) accelerator.save_state(UpperCamelCase_ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase_ ) test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase), (lowerCAmelCase)) : Optional[int] = model.a.item(), model.b.item() lowerCAmelCase : List[Any] = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) lowerCAmelCase : int = DummyModel() lowerCAmelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase, lowerCAmelCase : Dict = dummy_dataloaders() lowerCAmelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline lowerCAmelCase : Optional[int] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[Any] = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() ((lowerCAmelCase), (lowerCAmelCase)) : Optional[Any] = model.a.item(), model.b.item() lowerCAmelCase : List[str] = optimizer.state_dict() lowerCAmelCase : Union[str, Any] = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase), (lowerCAmelCase)) : int = model.a.item(), model.b.item() lowerCAmelCase : int = optimizer.state_dict() # Train partially set_seed(4_2 ) lowerCAmelCase : Optional[int] = DummyModel() lowerCAmelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase, lowerCAmelCase : Tuple = dummy_dataloaders() lowerCAmelCase : Dict = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ ) lowerCAmelCase : Tuple = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.load_state(os.path.join(UpperCamelCase_ , '''checkpoints''' , '''checkpoint_0''' ) ) ((lowerCAmelCase), (lowerCAmelCase)) : Optional[Any] = model.a.item(), model.b.item() lowerCAmelCase : int = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase_ , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase), (lowerCAmelCase)) : List[str] = model.a.item(), model.b.item() lowerCAmelCase : List[Any] = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Dict = torch.tensor([1, 2, 3] ) lowerCAmelCase : Tuple = torch.tensor([2, 3, 4] ) lowerCAmelCase : str = DummyModel() lowerCAmelCase : Optional[int] = torch.optim.Adam(net.parameters() ) lowerCAmelCase : List[Any] = Accelerator() with self.assertRaises(UpperCamelCase_ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) lowerCAmelCase : List[Any] = DummyModel() lowerCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase : Union[str, Any] = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 ) lowerCAmelCase, lowerCAmelCase : int = dummy_dataloaders() lowerCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline lowerCAmelCase : Tuple = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() lowerCAmelCase : List[Any] = scheduler.state_dict() train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase_ , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(UpperCamelCase_ , scheduler.state_dict() ) def lowerCamelCase__ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) lowerCAmelCase : str = DummyModel() lowerCAmelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 ) # Train baseline lowerCAmelCase : Tuple = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase : List[str] = accelerator.prepare(UpperCamelCase_ ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[str] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ : Optional[int] = '''/tmp/accelerate/state_checkpointing''' snake_case__ : Tuple = DummyModel() snake_case__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3) snake_case__ : Tuple = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) snake_case__ , snake_case__ : Dict = dummy_dataloaders() snake_case__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline snake_case__ : str = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) snake_case__ , snake_case__ : List[str] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: snake_case__ : Union[str, Any] = group['''params'''][0].device break assert param_device.type == accelerator.device.type snake_case__ : Any = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''') for group in optimizer.param_groups: snake_case__ : Any = group['''params'''][0].device break assert ( param_device.type == torch.device('''cpu''').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''') for group in optimizer.param_groups: snake_case__ : int = group['''params'''][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''): accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
314
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports snake_case__ : Optional[Any] = ''' import os ''' snake_case__ : Tuple = ''' def foo(): import os return False ''' snake_case__ : Any = ''' def foo(): def bar(): if True: import os return False return bar() ''' snake_case__ : Any = ''' import os try: import bar except ImportError: raise ValueError() ''' snake_case__ : int = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' snake_case__ : Any = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' snake_case__ : List[str] = ''' import os try: import bar except ImportError as e: raise ValueError() ''' snake_case__ : int = ''' import os try: import bar except: raise ValueError() ''' snake_case__ : List[Any] = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' snake_case__ : Optional[int] = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' snake_case__ : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , _snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ): lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' ) with open(_snake_case , '''w''' ) as _tmp_file: _tmp_file.write(_snake_case ) lowerCAmelCase : Tuple = get_imports(_snake_case ) assert parsed_imports == ["os"]
314
1
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case_( a__ ): def __init__( self : Any , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : str , ): super().__init__( UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : str = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths} lowerCAmelCase : Tuple = Text( cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCamelCase__ ( self : Dict ): # Build iterable dataset if self.streaming: lowerCAmelCase : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase : Any = None lowerCAmelCase : Dict = None lowerCAmelCase : List[str] = None lowerCAmelCase : int = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) lowerCAmelCase : Any = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
314
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ): super().__init__() lowerCAmelCase : Dict = initial_learning_rate lowerCAmelCase : List[str] = warmup_steps lowerCAmelCase : Union[str, Any] = power lowerCAmelCase : Dict = decay_schedule_fn lowerCAmelCase : str = name def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa ) lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa ) lowerCAmelCase : str = global_step_float / warmup_steps_float lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , ) def lowerCamelCase__ ( self : str ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ): lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , ) if num_warmup_steps: lowerCAmelCase : List[str] = WarmUp( initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , ) if weight_decay_rate > 0.0: lowerCAmelCase : Dict = AdamWeightDecay( learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , ) else: lowerCAmelCase : Any = tf.keras.optimizers.Adam( learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class snake_case_( a__ ): def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ): super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = weight_decay_rate lowerCAmelCase : List[str] = include_in_weight_decay lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay @classmethod def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp} return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ): super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Any = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) ) return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCAmelCase : Dict = apply_state or {} lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ): lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return False return True class snake_case_( a__ ): def __init__( self : Any ): lowerCAmelCase : Any = [] lowerCAmelCase : List[str] = None @property def lowerCamelCase__ ( self : List[str] ): if self._accum_steps is None: lowerCAmelCase : Optional[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCamelCase__ ( self : Any ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ): if not self._gradients: lowerCAmelCase : Any = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCamelCase_ ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCamelCase_ ) self._accum_steps.assign_add(1 ) def lowerCamelCase__ ( self : Union[str, Any] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
314
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class snake_case_( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : int=3 , UpperCamelCase_ : str=1_8 , UpperCamelCase_ : Tuple=3_0 , UpperCamelCase_ : Union[str, Any]=4_0_0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : Union[str, Any]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCamelCase_ : Optional[int]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCamelCase_ : Any=True , ): lowerCAmelCase : Optional[int] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} lowerCAmelCase : List[Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} lowerCAmelCase : Dict = parent lowerCAmelCase : List[Any] = batch_size lowerCAmelCase : Optional[Any] = num_channels lowerCAmelCase : Tuple = image_size lowerCAmelCase : Union[str, Any] = min_resolution lowerCAmelCase : Union[str, Any] = max_resolution lowerCAmelCase : List[Any] = do_resize lowerCAmelCase : List[Any] = size lowerCAmelCase : Dict = do_center_crop lowerCAmelCase : int = crop_size lowerCAmelCase : int = do_normalize lowerCAmelCase : Optional[int] = image_mean lowerCAmelCase : List[Any] = image_std lowerCAmelCase : Optional[int] = do_convert_rgb def lowerCamelCase__ ( self : Union[str, Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : int=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCAmelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCAmelCase : Tuple = [] for i in range(self.batch_size ): lowerCAmelCase, lowerCAmelCase : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCAmelCase : Any = [torch.from_numpy(UpperCamelCase_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase_ ) @property def lowerCamelCase__ ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def lowerCamelCase__ ( self : Any ): pass def lowerCamelCase__ ( self : Tuple ): # Initialize image_processing lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase__ ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase__ ( self : List[str] ): # Initialize image_processing lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : str ): lowerCAmelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = 3 @property def lowerCamelCase__ ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) ) def lowerCamelCase__ ( self : Optional[int] ): pass def lowerCamelCase__ ( self : str ): # Initialize image_processing lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
314
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path snake_case__ : Union[str, Any] = '''src/transformers''' # Matches is_xxx_available() snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: snake_case__ : Dict = re.compile(R'''^\s*try:''') # Catches a line with else: snake_case__ : int = re.compile(R'''^\s*else:''') def _snake_case ( _snake_case : Optional[Any] ): if _re_test_backend.search(_snake_case ) is None: return None lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : int = f.readlines() lowerCAmelCase : Tuple = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0] lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase : str = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase : int = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase : Optional[Any] = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase : Optional[Any] = lines[line_index] lowerCAmelCase : List[Any] = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase : List[str] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase : Any = lines[line_index] lowerCAmelCase : Tuple = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ): def find_duplicates(_snake_case : Tuple ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase : Any = [] for key in import_dict_objects.keys(): lowerCAmelCase : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _snake_case ( ): lowerCAmelCase : int = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' ) lowerCAmelCase : List[Any] = parse_init(_snake_case ) if objects is not None: lowerCAmelCase : Tuple = analyze_results(*_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules snake_case__ : str = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _snake_case ( ): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Any = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase : Any = spec.loader.load_module() lowerCAmelCase : Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_snake_case ) > 0: lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
314
1
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : int = '''T5Config''' def _snake_case ( _snake_case : jnp.array , _snake_case : int , _snake_case : int ): lowerCAmelCase : List[Any] = jnp.zeros_like(_snake_case ) lowerCAmelCase : Union[str, Any] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowerCAmelCase : Optional[int] = shifted_input_ids.at[:, 0].set(_snake_case ) lowerCAmelCase : Optional[Any] = jnp.where(shifted_input_ids == -100 , _snake_case , _snake_case ) return shifted_input_ids class snake_case_( a__ ): __UpperCamelCase = '''mt5''' __UpperCamelCase = MTaConfig class snake_case_( a__ ): __UpperCamelCase = '''mt5''' __UpperCamelCase = MTaConfig class snake_case_( a__ ): __UpperCamelCase = '''mt5''' __UpperCamelCase = MTaConfig
314
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : List[str] ): lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) lowerCAmelCase : Tuple = emb.weight.data return lin_layer def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ): lowerCAmelCase : Union[str, Any] = {} for old_key in state_dict.keys(): lowerCAmelCase : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' ) else: lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCAmelCase : Tuple = state_dict[old_key] return new_dict def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ): lowerCAmelCase : Optional[Any] = [] lowerCAmelCase : Tuple = 0 os.makedirs(_snake_case , exist_ok=_snake_case ) for expert in range(_snake_case ): lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(_snake_case ): lowerCAmelCase : List[str] = torch.load(_snake_case )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Any = os.path.join( _snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) torch.save(_snake_case , _snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_snake_case )[0]].dtype ) # Add the last block lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_snake_case ) == 1: lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case ) torch.save(_snake_case , _snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_snake_case , _snake_case ) # Otherwise, let's build the index lowerCAmelCase : Dict = {} for idx, shard in enumerate(_snake_case ): lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' ) lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) ) for key in shard: lowerCAmelCase : List[Any] = shard_file # Add the metadata lowerCAmelCase : Dict = {'''total_size''': total_size} lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n''' f.write(_snake_case ) return metadata, index if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) snake_case__ : List[str] = parser.parse_args() snake_case__ , snake_case__ : Tuple = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) snake_case__ : str = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
314
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : Optional[Any] , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : KarrasVeScheduler ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Tuple , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[Any] , ): lowerCAmelCase : List[str] = self.unet.config.sample_size lowerCAmelCase : Any = (batch_size, 3, img_size, img_size) lowerCAmelCase : Optional[Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) lowerCAmelCase : Union[str, Any] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper lowerCAmelCase : Optional[Any] = self.scheduler.schedule[t] lowerCAmelCase : Dict = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat lowerCAmelCase, lowerCAmelCase : List[Any] = self.scheduler.add_noise_to_input(UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. lowerCAmelCase : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev lowerCAmelCase : int = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. lowerCAmelCase : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample lowerCAmelCase : str = self.scheduler.step_correct( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , step_output.prev_sample , step_output['''derivative'''] , ) lowerCAmelCase : int = step_output.prev_sample lowerCAmelCase : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
"""simple docstring""" from math import sqrt def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase : Dict = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase : Optional[int] = False for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase : int = False break # precondition assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool" return status def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) ) lowerCAmelCase : Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_snake_case ) ): for j in range(i + 1 , len(_snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase : Any = 0 # filters actual prime numbers. lowerCAmelCase : Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase : Tuple = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_snake_case ): ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase : Dict = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase : Optional[int] = 2 lowerCAmelCase : List[str] = number if number == 0 or number == 1: ans.append(_snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_snake_case ): while quotient != 1: if is_prime(_snake_case ) and (quotient % factor == 0): ans.append(_snake_case ) quotient /= factor else: factor += 1 else: ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : Tuple ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : Optional[Any] = 0 # prime factorization of 'number' lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Any = max(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Dict ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : int = 0 # prime factorization of 'number' lowerCAmelCase : List[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Optional[int] = min(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool" return number % 2 == 0 def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool" return number % 2 != 0 def _snake_case ( _snake_case : Tuple ): assert ( isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case ) ), "'number' must been an int, even and > 2" lowerCAmelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case ) lowerCAmelCase : Optional[Any] = len(_snake_case ) # run variable for while-loops. lowerCAmelCase : List[str] = 0 lowerCAmelCase : Tuple = None # exit variable. for break up the loops lowerCAmelCase : str = True while i < len_pn and loop: lowerCAmelCase : str = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase : Dict = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and (len(_snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Dict = 0 while numbera != 0: lowerCAmelCase : Union[str, Any] = numbera % numbera lowerCAmelCase : List[Any] = numbera lowerCAmelCase : List[Any] = rest # precondition assert isinstance(_snake_case , _snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase : List[str] = prime_factorization(_snake_case ) lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case ) elif numbera == 1 or numbera == 1: lowerCAmelCase : Union[str, Any] = [] lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = max(_snake_case , _snake_case ) lowerCAmelCase : Dict = 0 lowerCAmelCase : int = 0 lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case ) lowerCAmelCase : Any = prime_fac_a.count(_snake_case ) for _ in range(max(_snake_case , _snake_case ) ): ans *= n else: lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _snake_case ( _snake_case : Any ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_snake_case ): ans += 1 # precondition assert isinstance(_snake_case , _snake_case ) and is_prime( _snake_case ), "'ans' must been a prime number and from type int" return ans def _snake_case ( _snake_case : Any , _snake_case : Dict ): assert ( is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number lowerCAmelCase : str = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_snake_case ): number += 1 while number < p_number_a: ans.append(_snake_case ) number += 1 # fetch the next prime number. while not is_prime(_snake_case ): number += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and ans[0] != p_number_a and ans[len(_snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _snake_case ( _snake_case : List[Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_snake_case ) # precondition assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase : int = get_divisors(_snake_case ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (divisors[0] == 1) and (divisors[len(_snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _snake_case ( _snake_case : Optional[int] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase : Optional[Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase : Dict = 0 lowerCAmelCase : Dict = 1 lowerCAmelCase : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase : int = ans ans += fiba lowerCAmelCase : Optional[Any] = tmp return ans
314
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case__ : List[str] = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _snake_case ( _snake_case : List[Any] ): config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _snake_case ( _snake_case : Union[str, Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def _snake_case ( _snake_case : Optional[int] ): from transformers.testing_utils import pytest_terminal_summary_main lowerCAmelCase : Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case ) def _snake_case ( _snake_case : str , _snake_case : Any ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowerCAmelCase : Tuple = 0 # Doctest custom flag to ignore output. snake_case__ : List[Any] = doctest.register_optionflag('''IGNORE_RESULT''') snake_case__ : Dict = doctest.OutputChecker class snake_case_( a__ ): def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Any = CustomOutputChecker snake_case__ : Any = HfDoctestModule snake_case__ : List[Any] = HfDocTestParser
314
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_( a__ ): __UpperCamelCase = '''vit_msn''' def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Any = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : List[Any] = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : Tuple = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : Tuple = image_size lowerCAmelCase : List[str] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = qkv_bias
314
1
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent snake_case__ : int = {'''UserAgent''': UserAgent().random} def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : Optional[Any] = script.contents[0] lowerCAmelCase : List[str] = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class snake_case_: def __init__( self : Optional[int] , UpperCamelCase_ : int ): lowerCAmelCase : Tuple = F'''https://www.instagram.com/{username}/''' lowerCAmelCase : List[str] = self.get_json() def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[int] = requests.get(self.url , headers=UpperCamelCase_ ).text lowerCAmelCase : Optional[int] = BeautifulSoup(UpperCamelCase_ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : List[Any] ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : Dict ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase__ ( self : List[str] ): return self.user_data["username"] @property def lowerCamelCase__ ( self : List[Any] ): return self.user_data["full_name"] @property def lowerCamelCase__ ( self : str ): return self.user_data["biography"] @property def lowerCamelCase__ ( self : int ): return self.user_data["business_email"] @property def lowerCamelCase__ ( self : Optional[int] ): return self.user_data["external_url"] @property def lowerCamelCase__ ( self : Any ): return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase__ ( self : List[str] ): return self.user_data["edge_follow"]["count"] @property def lowerCamelCase__ ( self : str ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase__ ( self : int ): return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase__ ( self : List[str] ): return self.user_data["is_verified"] @property def lowerCamelCase__ ( self : Optional[Any] ): return self.user_data["is_private"] def _snake_case ( _snake_case : str = "github" ): import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions lowerCAmelCase : int = InstagramUser(_snake_case ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _snake_case ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : List[str] = InstagramUser('''github''') print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
314
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case__ : Optional[Any] = logging.getLogger(__name__) def _snake_case ( _snake_case : str ): lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case ) lowerCAmelCase : Optional[int] = { '''repo_id''': str(_snake_case ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f: json.dump(_snake_case , _snake_case , indent=4 ) def _snake_case ( _snake_case : Any ): if params.n_gpu <= 0: lowerCAmelCase : Dict = 0 lowerCAmelCase : Optional[int] = -1 lowerCAmelCase : Dict = True lowerCAmelCase : int = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] ) lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] ) lowerCAmelCase : int = int(os.environ['''RANK'''] ) # number of nodes / node ID lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node lowerCAmelCase : str = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCAmelCase : List[Any] = 1 lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : Any = 1 lowerCAmelCase : Any = 1 lowerCAmelCase : Dict = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0 lowerCAmelCase : List[Any] = params.n_nodes > 1 # summary lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def _snake_case ( _snake_case : Optional[int] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
314
1
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path snake_case__ : Union[str, Any] = '''src/transformers''' # Matches is_xxx_available() snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: snake_case__ : Dict = re.compile(R'''^\s*try:''') # Catches a line with else: snake_case__ : int = re.compile(R'''^\s*else:''') def _snake_case ( _snake_case : Optional[Any] ): if _re_test_backend.search(_snake_case ) is None: return None lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : int = f.readlines() lowerCAmelCase : Tuple = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0] lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase : str = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase : int = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase : Optional[Any] = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase : Optional[Any] = lines[line_index] lowerCAmelCase : List[Any] = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase : List[str] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase : Any = lines[line_index] lowerCAmelCase : Tuple = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ): def find_duplicates(_snake_case : Tuple ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase : Any = [] for key in import_dict_objects.keys(): lowerCAmelCase : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _snake_case ( ): lowerCAmelCase : int = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' ) lowerCAmelCase : List[Any] = parse_init(_snake_case ) if objects is not None: lowerCAmelCase : Tuple = analyze_results(*_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules snake_case__ : str = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _snake_case ( ): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Any = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase : Any = spec.loader.load_module() lowerCAmelCase : Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_snake_case ) > 0: lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
314
"""simple docstring""" def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0''' raise ValueError(_snake_case ) else: lowerCAmelCase : str = sylvester(number - 1 ) lowerCAmelCase : Optional[Any] = num - 1 lowerCAmelCase : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
314
1
"""simple docstring""" from ... import PretrainedConfig snake_case__ : List[str] = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class snake_case_( a__ ): __UpperCamelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __UpperCamelCase = '''nezha''' def __init__( self : List[Any] , UpperCamelCase_ : List[str]=2_1_1_2_8 , UpperCamelCase_ : List[Any]=7_6_8 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : Any=1_2 , UpperCamelCase_ : int=3_0_7_2 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=5_1_2 , UpperCamelCase_ : List[str]=6_4 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Optional[int]=1E-12 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Dict=True , **UpperCamelCase_ : int , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Dict = vocab_size lowerCAmelCase : List[str] = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : int = num_attention_heads lowerCAmelCase : Optional[Any] = hidden_act lowerCAmelCase : Optional[Any] = intermediate_size lowerCAmelCase : List[str] = hidden_dropout_prob lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase : Union[str, Any] = max_position_embeddings lowerCAmelCase : str = max_relative_position lowerCAmelCase : int = type_vocab_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Optional[int] = layer_norm_eps lowerCAmelCase : Optional[Any] = classifier_dropout lowerCAmelCase : List[str] = use_cache
314
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCAmelCase : Union[str, Any] = 6 lowerCAmelCase : Any = 128 lowerCAmelCase : List[Any] = (2, 2, 18, 2) lowerCAmelCase : Any = (4, 8, 16, 32) elif "large" in model_name: lowerCAmelCase : Tuple = 12 lowerCAmelCase : Dict = 192 lowerCAmelCase : List[str] = (2, 2, 18, 2) lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCAmelCase : Optional[int] = window_size lowerCAmelCase : Any = embed_dim lowerCAmelCase : Optional[Any] = depths lowerCAmelCase : int = num_heads return config def _snake_case ( _snake_case : Union[str, Any] ): if "encoder.mask_token" in name: lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCAmelCase : Tuple = '''layernorm.weight''' if name == "encoder.norm.bias": lowerCAmelCase : str = '''layernorm.bias''' if "decoder" in name: pass else: lowerCAmelCase : Optional[Any] = '''swin.''' + name return name def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ): for key in orig_state_dict.copy().keys(): lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case ) if "attn_mask" in key: pass elif "qkv" in key: lowerCAmelCase : List[Any] = key.split('''.''' ) lowerCAmelCase : Dict = int(key_split[2] ) lowerCAmelCase : Optional[Any] = int(key_split[4] ) lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase : Dict = val[:dim, :] lowerCAmelCase : Dict = val[ dim : dim * 2, : ] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : str = val[ :dim ] lowerCAmelCase : List[str] = val[ dim : dim * 2 ] lowerCAmelCase : Optional[Any] = val[ -dim: ] else: lowerCAmelCase : str = val return orig_state_dict def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ): lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model'''] lowerCAmelCase : List[Any] = get_swin_config(_snake_case ) lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case ) model.eval() lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) snake_case__ : Dict = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
314
1
"""simple docstring""" import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap snake_case__ : Tuple = '''Usage of script: script_name <size_of_canvas:int>''' snake_case__ : str = [0] * 100 + [1] * 10 random.shuffle(choice) def _snake_case ( _snake_case : int ): lowerCAmelCase : int = [[False for i in range(_snake_case )] for j in range(_snake_case )] return canvas def _snake_case ( _snake_case : list[list[bool]] ): for i, row in enumerate(_snake_case ): for j, _ in enumerate(_snake_case ): lowerCAmelCase : Tuple = bool(random.getrandbits(1 ) ) def _snake_case ( _snake_case : list[list[bool]] ): lowerCAmelCase : str = np.array(_snake_case ) lowerCAmelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(_snake_case ): for c, pt in enumerate(_snake_case ): lowerCAmelCase : Optional[Any] = __judge_point( _snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) lowerCAmelCase : int = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. lowerCAmelCase : list[list[bool]] = current_canvas.tolist() return return_canvas def _snake_case ( _snake_case : bool , _snake_case : list[list[bool]] ): lowerCAmelCase : Any = 0 lowerCAmelCase : Union[str, Any] = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. lowerCAmelCase : Optional[Any] = pt if pt: if alive < 2: lowerCAmelCase : Optional[Any] = False elif alive == 2 or alive == 3: lowerCAmelCase : Tuple = True elif alive > 3: lowerCAmelCase : Any = False else: if alive == 3: lowerCAmelCase : Optional[int] = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) snake_case__ : Any = int(sys.argv[1]) # main working structure of this module. snake_case__ : Optional[Any] = create_canvas(canvas_size) seed(c) snake_case__ , snake_case__ : Tuple = plt.subplots() fig.show() snake_case__ : str = ListedColormap(['''w''', '''k''']) try: while True: snake_case__ : Optional[Any] = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
314
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[int] = logging.get_logger(__name__) def _snake_case ( _snake_case : Dict , _snake_case : Union[str, Any]=False ): lowerCAmelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('''head''' ): lowerCAmelCase : Dict = '''segformer.encoder.''' + key if key.startswith('''backbone''' ): lowerCAmelCase : int = key.replace('''backbone''' , '''segformer.encoder''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase : Dict = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] lowerCAmelCase : Union[str, Any] = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: lowerCAmelCase : str = key.replace('''norm''' , '''layer_norm''' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase : List[str] = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )] lowerCAmelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: lowerCAmelCase : List[Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: lowerCAmelCase : Tuple = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase : Tuple = key[key.find('''block''' ) + len('''block''' )] lowerCAmelCase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: lowerCAmelCase : Dict = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: lowerCAmelCase : Any = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: lowerCAmelCase : str = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: lowerCAmelCase : int = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: lowerCAmelCase : Union[str, Any] = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: lowerCAmelCase : Optional[Any] = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: lowerCAmelCase : Union[str, Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) lowerCAmelCase : int = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase : Tuple = key[key.find('''linear_c''' ) + len('''linear_c''' )] lowerCAmelCase : Optional[int] = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_snake_case )-1}''' ) if key.startswith('''head''' ): lowerCAmelCase : Dict = key.replace('''head''' , '''classifier''' ) lowerCAmelCase : str = value return new_state_dict def _snake_case ( _snake_case : Optional[int] , _snake_case : List[str] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase : Tuple = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase : Tuple = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase : Dict = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase : List[str] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase : Dict = kv_bias[ config.hidden_sizes[i] : ] def _snake_case ( ): lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Tuple = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _snake_case ( _snake_case : str , _snake_case : Tuple , _snake_case : Dict ): lowerCAmelCase : List[str] = SegformerConfig() lowerCAmelCase : Any = False # set attributes based on model_name lowerCAmelCase : Tuple = '''huggingface/label-files''' if "segformer" in model_name: lowerCAmelCase : List[Any] = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2] if "ade" in model_name: lowerCAmelCase : Any = 150 lowerCAmelCase : Tuple = '''ade20k-id2label.json''' lowerCAmelCase : Any = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase : int = 19 lowerCAmelCase : Tuple = '''cityscapes-id2label.json''' lowerCAmelCase : Union[str, Any] = (1, 19, 128, 128) else: raise ValueError(f'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase : str = True lowerCAmelCase : Optional[Any] = model_name[4:6] lowerCAmelCase : List[str] = 1000 lowerCAmelCase : Tuple = '''imagenet-1k-id2label.json''' lowerCAmelCase : Union[str, Any] = (1, 1000) else: raise ValueError(f'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} lowerCAmelCase : List[Any] = idalabel lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase : Union[str, Any] = [64, 128, 320, 512] lowerCAmelCase : Union[str, Any] = 256 elif size == "b2": lowerCAmelCase : int = [64, 128, 320, 512] lowerCAmelCase : Tuple = 768 lowerCAmelCase : List[str] = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase : int = [64, 128, 320, 512] lowerCAmelCase : Tuple = 768 lowerCAmelCase : List[Any] = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase : Tuple = [64, 128, 320, 512] lowerCAmelCase : Dict = 768 lowerCAmelCase : Tuple = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase : Tuple = [64, 128, 320, 512] lowerCAmelCase : List[Any] = 768 lowerCAmelCase : List[Any] = [3, 6, 40, 3] else: raise ValueError(f'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase : Optional[int] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) # prepare image lowerCAmelCase : Union[str, Any] = prepare_img() lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase : Optional[int] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) ) else: lowerCAmelCase : Union[str, Any] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )['''state_dict'''] # rename keys lowerCAmelCase : List[str] = rename_keys(_snake_case , encoder_only=_snake_case ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : str = SegformerForImageClassification(_snake_case ) else: lowerCAmelCase : Dict = SegformerForSemanticSegmentation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass lowerCAmelCase : List[Any] = model(_snake_case ) lowerCAmelCase : str = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase : Optional[int] = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase : List[str] = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase : Tuple = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase : str = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase : Tuple = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase : Union[str, Any] = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase : Optional[Any] = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase : List[str] = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase : Optional[Any] = torch.tensor( [ [ [-1.1372E01, -1.2787E01, -1.3477E01], [-1.2536E01, -1.4194E01, -1.4409E01], [-1.3217E01, -1.4888E01, -1.5327E01], ], [ [-1.4791E01, -1.7122E01, -1.8277E01], [-1.7163E01, -1.9192E01, -1.9533E01], [-1.7897E01, -1.9991E01, -2.0315E01], ], [ [7.6723E-01, 4.1921E-01, -7.7878E-02], [4.7772E-01, 9.5557E-03, -2.8082E-01], [3.6032E-01, -2.4826E-01, -5.1168E-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase : Any = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase : List[Any] = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase : Dict = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase : Tuple = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase : Optional[int] = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase : List[str] = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: lowerCAmelCase : str = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) model.save_pretrained(_snake_case ) image_processor.save_pretrained(_snake_case ) if __name__ == "__main__": snake_case__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) snake_case__ : Optional[int] = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
314
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : int = -1 lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : str = TextStreamer(UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() lowerCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = -1 lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = -1 lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : str = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[str] = '''''' for new_text in streamer: streamer_text += new_text
314
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _snake_case ( _snake_case : str , _snake_case : float | Decimal , _snake_case : float = 10**-10 ): lowerCAmelCase : str = a while True: lowerCAmelCase : int = Decimal(_snake_case ) - ( Decimal(eval(_snake_case ) ) / Decimal(eval(str(diff(_snake_case ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_snake_case ) ) < precision: # noqa: S307 return float(_snake_case ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
314
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Dict = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys snake_case__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ): lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0] lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ ) lowerCAmelCase : Any = nsfw_detected.flatten() lowerCAmelCase : Dict = nsfw_detected > p_threshold lowerCAmelCase : int = nsfw_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ): if nsfw_detected_: lowerCAmelCase : List[Any] = np.zeros(images[idx].shape ) lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = watermark_detected.flatten() lowerCAmelCase : Optional[int] = watermark_detected > w_threshold lowerCAmelCase : Union[str, Any] = watermark_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(UpperCamelCase_ ): if watermark_detected_: lowerCAmelCase : List[str] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
314
1
"""simple docstring""" import argparse snake_case__ : Union[str, Any] = '''docs/source/_static/js/custom.js''' def _snake_case ( _snake_case : Dict ): with open(_snake_case , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : Optional[int] = f.readlines() lowerCAmelCase : str = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 lowerCAmelCase : List[Any] = f'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += f''' "v{version}": "v{version}",\n''' with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : List[Any] = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') snake_case__ : Dict = parser.parse_args() update_custom_js(args.version)
314
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } snake_case__ : Union[str, Any] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } snake_case__ : Optional[Any] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase : Tuple = do_lower_case lowerCAmelCase : Union[str, Any] = strip_accents lowerCAmelCase : Tuple = tokenize_chinese_chars lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
314
1
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : int = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class snake_case_( a__ ): __UpperCamelCase = '''encodec''' def __init__( self : Dict , UpperCamelCase_ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase_ : int=2_4_0_0_0 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=1_2_8 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Optional[int]=[8, 5, 4, 2] , UpperCamelCase_ : Optional[Any]="weight_norm" , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]="reflect" , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=1.0 , UpperCamelCase_ : Any=1_0_2_4 , UpperCamelCase_ : int=None , UpperCamelCase_ : int=True , **UpperCamelCase_ : Dict , ): lowerCAmelCase : Optional[int] = target_bandwidths lowerCAmelCase : Tuple = sampling_rate lowerCAmelCase : Optional[Any] = audio_channels lowerCAmelCase : Dict = normalize lowerCAmelCase : int = chunk_length_s lowerCAmelCase : Union[str, Any] = overlap lowerCAmelCase : Optional[Any] = hidden_size lowerCAmelCase : Optional[Any] = num_filters lowerCAmelCase : Dict = num_residual_layers lowerCAmelCase : List[str] = upsampling_ratios lowerCAmelCase : List[Any] = norm_type lowerCAmelCase : Tuple = kernel_size lowerCAmelCase : str = last_kernel_size lowerCAmelCase : List[str] = residual_kernel_size lowerCAmelCase : int = dilation_growth_rate lowerCAmelCase : Tuple = use_causal_conv lowerCAmelCase : Optional[int] = pad_mode lowerCAmelCase : Any = compress lowerCAmelCase : Optional[int] = num_lstm_layers lowerCAmelCase : List[Any] = trim_right_ratio lowerCAmelCase : Dict = codebook_size lowerCAmelCase : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size lowerCAmelCase : Optional[Any] = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**UpperCamelCase_ ) @property def lowerCamelCase__ ( self : Tuple ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowerCamelCase__ ( self : Tuple ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Dict = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def lowerCamelCase__ ( self : Optional[int] ): return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case__ : int = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. snake_case__ : Union[str, Any] = importlib.util.spec_from_file_location( '''transformers''', os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) snake_case__ : Optional[Any] = spec.loader.load_module() snake_case__ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` snake_case__ : int = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') snake_case__ : Union[str, Any] = { '''CLIPConfigMixin''', '''DecisionTransformerConfigMixin''', '''EncoderDecoderConfigMixin''', '''RagConfigMixin''', '''SpeechEncoderDecoderConfigMixin''', '''VisionEncoderDecoderConfigMixin''', '''VisionTextDualEncoderConfigMixin''', } def _snake_case ( ): lowerCAmelCase : str = [] for config_class in list(CONFIG_MAPPING.values() ): lowerCAmelCase : int = False # source code of `config_class` lowerCAmelCase : Dict = inspect.getsource(_snake_case ) lowerCAmelCase : Tuple = _re_checkpoint.findall(_snake_case ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` lowerCAmelCase, lowerCAmelCase : str = checkpoint # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase : Tuple = f'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowerCAmelCase : Tuple = True break lowerCAmelCase : Optional[Any] = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : Tuple = '''\n'''.join(sorted(_snake_case ) ) raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
314
"""simple docstring""" def _snake_case ( _snake_case : int = 50000000 ): lowerCAmelCase : List[str] = set() lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) ) lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) ) for primea in primes: lowerCAmelCase : Optional[Any] = primea * primea for primea in primes: lowerCAmelCase : List[Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase : Tuple = primea * primea * primea * primea lowerCAmelCase : Tuple = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() lowerCAmelCase : int = BlipImageProcessor() lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) lowerCAmelCase : Optional[int] = BlipaProcessor(UpperCamelCase_ , UpperCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase_ : List[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor def lowerCamelCase__ ( self : Dict ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase : int = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self : str ): lowerCAmelCase : Tuple = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase : str = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) lowerCAmelCase : List[str] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[Any] = self.get_image_processor() lowerCAmelCase : Dict = self.get_tokenizer() lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : int = self.prepare_image_inputs() lowerCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors='''np''' ) lowerCAmelCase : List[Any] = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.get_image_processor() lowerCAmelCase : Union[str, Any] = self.get_tokenizer() lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : List[Any] = '''lower newer''' lowerCAmelCase : Union[str, Any] = processor(text=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Tuple = self.get_image_processor() lowerCAmelCase : Any = self.get_tokenizer() lowerCAmelCase : Optional[int] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''lower newer''' lowerCAmelCase : List[str] = self.prepare_image_inputs() lowerCAmelCase : List[str] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCamelCase__ ( self : str ): lowerCAmelCase : Union[str, Any] = self.get_image_processor() lowerCAmelCase : Optional[Any] = self.get_tokenizer() lowerCAmelCase : Dict = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase : Optional[Any] = processor.batch_decode(UpperCamelCase_ ) lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[int] = self.get_image_processor() lowerCAmelCase : List[str] = self.get_tokenizer() lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : str = '''lower newer''' lowerCAmelCase : Any = self.prepare_image_inputs() lowerCAmelCase : Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Tuple = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor'''] snake_case__ : List[Any] = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] snake_case__ : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency snake_case__ : List[str] = { '''E''': 1_2.7_0, '''T''': 9.0_6, '''A''': 8.1_7, '''O''': 7.5_1, '''I''': 6.9_7, '''N''': 6.7_5, '''S''': 6.3_3, '''H''': 6.0_9, '''R''': 5.9_9, '''D''': 4.2_5, '''L''': 4.0_3, '''C''': 2.7_8, '''U''': 2.7_6, '''M''': 2.4_1, '''W''': 2.3_6, '''F''': 2.2_3, '''G''': 2.0_2, '''Y''': 1.9_7, '''P''': 1.9_3, '''B''': 1.2_9, '''V''': 0.9_8, '''K''': 0.7_7, '''J''': 0.1_5, '''X''': 0.1_5, '''Q''': 0.1_0, '''Z''': 0.0_7, } snake_case__ : List[str] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ''' snake_case__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def _snake_case ( _snake_case : str ): lowerCAmelCase : Optional[Any] = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( _snake_case : tuple ): return x[0] def _snake_case ( _snake_case : str ): lowerCAmelCase : Optional[Any] = get_letter_count(_snake_case ) lowerCAmelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(_snake_case ) lowerCAmelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case ) lowerCAmelCase : Optional[Any] = ''''''.join(freq_to_letter[freq] ) lowerCAmelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=_snake_case , reverse=_snake_case ) lowerCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(_snake_case ) def _snake_case ( _snake_case : str ): lowerCAmelCase : str = get_frequency_order(_snake_case ) lowerCAmelCase : Tuple = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
314
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
1
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : List[str] ): lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) lowerCAmelCase : Tuple = emb.weight.data return lin_layer def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ): lowerCAmelCase : Union[str, Any] = {} for old_key in state_dict.keys(): lowerCAmelCase : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' ) else: lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCAmelCase : Tuple = state_dict[old_key] return new_dict def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ): lowerCAmelCase : Optional[Any] = [] lowerCAmelCase : Tuple = 0 os.makedirs(_snake_case , exist_ok=_snake_case ) for expert in range(_snake_case ): lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(_snake_case ): lowerCAmelCase : List[str] = torch.load(_snake_case )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Any = os.path.join( _snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) torch.save(_snake_case , _snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_snake_case )[0]].dtype ) # Add the last block lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_snake_case ) == 1: lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case ) torch.save(_snake_case , _snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_snake_case , _snake_case ) # Otherwise, let's build the index lowerCAmelCase : Dict = {} for idx, shard in enumerate(_snake_case ): lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' ) lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) ) for key in shard: lowerCAmelCase : List[Any] = shard_file # Add the metadata lowerCAmelCase : Dict = {'''total_size''': total_size} lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n''' f.write(_snake_case ) return metadata, index if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) snake_case__ : List[str] = parser.parse_args() snake_case__ , snake_case__ : Tuple = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) snake_case__ : str = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
314
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : List[Any] = set() for token in tokens: lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : List[str] = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Optional[Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : str = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j] lowerCAmelCase : Union[str, Any] = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : int = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : Any = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Dict ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[str] = f.readlines() lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : int = parser.parse_args() main(args)
314
1
"""simple docstring""" from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : Optional[int] = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” snake_case__ : Optional[int] = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py snake_case__ : int = 0 snake_case__ : str = 0xE_0_0_0 snake_case__ : Optional[int] = 0xE_0_0_1 snake_case__ : List[str] = 0xE_0_0_2 snake_case__ : Any = 0xE_0_0_3 snake_case__ : Dict = 0xE_0_0_4 # Maps special codepoints to human-readable names. snake_case__ : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. snake_case__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class snake_case_( a__ ): __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCamelCase_ : List[str]=chr(UpperCamelCase_ ) , UpperCamelCase_ : Tuple=chr(UpperCamelCase_ ) , UpperCamelCase_ : str=chr(UpperCamelCase_ ) , UpperCamelCase_ : Any=chr(UpperCamelCase_ ) , UpperCamelCase_ : Optional[Any]=chr(UpperCamelCase_ ) , UpperCamelCase_ : Dict=chr(UpperCamelCase_ ) , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=2_0_4_8 , **UpperCamelCase_ : Tuple , ): lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token lowerCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , model_max_length=UpperCamelCase_ , **UpperCamelCase_ , ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase : Union[str, Any] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase : Dict = UNICODE_VOCAB_SIZE lowerCAmelCase : Tuple = len(self._special_codepoints ) @property def lowerCamelCase__ ( self : Dict ): return self._unicode_vocab_size def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str ): return list(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str ): try: return ord(UpperCamelCase_ ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : int ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase_ ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str ): return "".join(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : List[str] = [self.sep_token_id] lowerCAmelCase : Dict = [self.cls_token_id] lowerCAmelCase : Union[str, Any] = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Dict = [1] + ([0] * len(UpperCamelCase_ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase_ )) + [1] return result def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : List[str] = [self.sep_token_id] lowerCAmelCase : Optional[int] = [self.cls_token_id] lowerCAmelCase : str = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): return ()
314
"""simple docstring""" import numpy as np from PIL import Image def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Dict = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : int = 0 lowerCAmelCase : Dict = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Union[str, Any] = 0 # compute the shape of the output matrix lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : int = 0 lowerCAmelCase : Tuple = 0 return updated_arr def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Union[str, Any] = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 # compute the shape of the output matrix lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : str = 0 lowerCAmelCase : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image snake_case__ : Optional[Any] = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
314
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _snake_case ( _snake_case : List[str] , _snake_case : int=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def _snake_case ( _snake_case : Dict , _snake_case : str=0 ): lowerCAmelCase : Dict = [] for old_item in old_list: lowerCAmelCase : Union[str, Any] = old_item.replace('''in_layers.0''' , '''norm1''' ) lowerCAmelCase : Union[str, Any] = new_item.replace('''in_layers.2''' , '''conv1''' ) lowerCAmelCase : Tuple = new_item.replace('''out_layers.0''' , '''norm2''' ) lowerCAmelCase : List[str] = new_item.replace('''out_layers.3''' , '''conv2''' ) lowerCAmelCase : Dict = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) lowerCAmelCase : Tuple = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) lowerCAmelCase : Dict = shave_segments(_snake_case , n_shave_prefix_segments=_snake_case ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=0 ): lowerCAmelCase : Optional[Any] = [] for old_item in old_list: lowerCAmelCase : Optional[int] = old_item lowerCAmelCase : Any = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) lowerCAmelCase : Optional[Any] = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) lowerCAmelCase : List[Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) lowerCAmelCase : Tuple = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) lowerCAmelCase : str = shave_segments(_snake_case , n_shave_prefix_segments=_snake_case ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def _snake_case ( _snake_case : Tuple , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : List[Any]=None , _snake_case : List[Any]=None , _snake_case : Union[str, Any]=None ): assert isinstance(_snake_case , _snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase : str = old_checkpoint[path] lowerCAmelCase : Dict = old_tensor.shape[0] // 3 lowerCAmelCase : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase : Any = old_tensor.shape[0] // config['''num_head_channels'''] // 3 lowerCAmelCase : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase : Tuple = query.reshape(_snake_case ) lowerCAmelCase : Union[str, Any] = key.reshape(_snake_case ) lowerCAmelCase : Optional[int] = value.reshape(_snake_case ) for path in paths: lowerCAmelCase : Union[str, Any] = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase : str = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) lowerCAmelCase : Optional[int] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) lowerCAmelCase : int = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase : Union[str, Any] = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase : Dict = old_checkpoint[path['''old''']][:, :, 0] else: lowerCAmelCase : Union[str, Any] = old_checkpoint[path['''old''']] def _snake_case ( _snake_case : int , _snake_case : str ): lowerCAmelCase : Any = {} lowerCAmelCase : Dict = checkpoint['''time_embed.0.weight'''] lowerCAmelCase : Tuple = checkpoint['''time_embed.0.bias'''] lowerCAmelCase : Optional[int] = checkpoint['''time_embed.2.weight'''] lowerCAmelCase : List[Any] = checkpoint['''time_embed.2.bias'''] lowerCAmelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.weight'''] lowerCAmelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.bias'''] lowerCAmelCase : List[str] = checkpoint['''out.0.weight'''] lowerCAmelCase : Optional[Any] = checkpoint['''out.0.bias'''] lowerCAmelCase : Tuple = checkpoint['''out.2.weight'''] lowerCAmelCase : Union[str, Any] = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only lowerCAmelCase : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) lowerCAmelCase : str = { layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key] for layer_id in range(_snake_case ) } # Retrieves the keys for the middle blocks only lowerCAmelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) lowerCAmelCase : Dict = { layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key] for layer_id in range(_snake_case ) } # Retrieves the keys for the output blocks only lowerCAmelCase : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) lowerCAmelCase : List[Any] = { layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key] for layer_id in range(_snake_case ) } for i in range(1 , _snake_case ): lowerCAmelCase : Optional[int] = (i - 1) // (config['''num_res_blocks'''] + 1) lowerCAmelCase : List[str] = (i - 1) % (config['''num_res_blocks'''] + 1) lowerCAmelCase : List[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key] lowerCAmelCase : Dict = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key] if f'''input_blocks.{i}.0.op.weight''' in checkpoint: lowerCAmelCase : Union[str, Any] = checkpoint[ f'''input_blocks.{i}.0.op.weight''' ] lowerCAmelCase : Tuple = checkpoint[ f'''input_blocks.{i}.0.op.bias''' ] continue lowerCAmelCase : Tuple = renew_resnet_paths(_snake_case ) lowerCAmelCase : List[str] = {'''old''': f'''input_blocks.{i}.0''', '''new''': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} lowerCAmelCase : Optional[int] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( _snake_case , _snake_case , _snake_case , additional_replacements=[meta_path, resnet_op] , config=_snake_case ) if len(_snake_case ): lowerCAmelCase : Union[str, Any] = renew_attention_paths(_snake_case ) lowerCAmelCase : Optional[Any] = { '''old''': f'''input_blocks.{i}.1''', '''new''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } lowerCAmelCase : Tuple = { f'''input_blocks.{i}.1.qkv.bias''': { '''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', '''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', '''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''input_blocks.{i}.1.qkv.weight''': { '''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', '''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', '''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( _snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , attention_paths_to_split=_snake_case , config=_snake_case , ) lowerCAmelCase : List[str] = middle_blocks[0] lowerCAmelCase : Optional[int] = middle_blocks[1] lowerCAmelCase : int = middle_blocks[2] lowerCAmelCase : Union[str, Any] = renew_resnet_paths(_snake_case ) assign_to_checkpoint(_snake_case , _snake_case , _snake_case , config=_snake_case ) lowerCAmelCase : Dict = renew_resnet_paths(_snake_case ) assign_to_checkpoint(_snake_case , _snake_case , _snake_case , config=_snake_case ) lowerCAmelCase : Dict = renew_attention_paths(_snake_case ) lowerCAmelCase : Optional[int] = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( _snake_case , _snake_case , _snake_case , attention_paths_to_split=_snake_case , config=_snake_case ) for i in range(_snake_case ): lowerCAmelCase : int = i // (config['''num_res_blocks'''] + 1) lowerCAmelCase : Any = i % (config['''num_res_blocks'''] + 1) lowerCAmelCase : Optional[int] = [shave_segments(_snake_case , 2 ) for name in output_blocks[i]] lowerCAmelCase : Tuple = {} for layer in output_block_layers: lowerCAmelCase, lowerCAmelCase : int = layer.split('''.''' )[0], shave_segments(_snake_case , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_snake_case ) else: lowerCAmelCase : Optional[Any] = [layer_name] if len(_snake_case ) > 1: lowerCAmelCase : List[str] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key] lowerCAmelCase : Optional[int] = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key] lowerCAmelCase : str = renew_resnet_paths(_snake_case ) lowerCAmelCase : List[str] = renew_resnet_paths(_snake_case ) lowerCAmelCase : Optional[Any] = {'''old''': f'''output_blocks.{i}.0''', '''new''': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase : Any = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) lowerCAmelCase : List[Any] = checkpoint[ f'''output_blocks.{i}.{index}.conv.weight''' ] lowerCAmelCase : Union[str, Any] = checkpoint[ f'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(_snake_case ) == 2: lowerCAmelCase : Union[str, Any] = [] if len(_snake_case ): lowerCAmelCase : int = renew_attention_paths(_snake_case ) lowerCAmelCase : Optional[int] = { '''old''': f'''output_blocks.{i}.1''', '''new''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } lowerCAmelCase : int = { f'''output_blocks.{i}.1.qkv.bias''': { '''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', '''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', '''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''output_blocks.{i}.1.qkv.weight''': { '''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', '''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', '''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( _snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_snake_case , ) else: lowerCAmelCase : Tuple = renew_resnet_paths(_snake_case , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase : Dict = '''.'''.join(['''output_blocks''', str(_snake_case ), path['''old''']] ) lowerCAmelCase : int = '''.'''.join(['''up_blocks''', str(_snake_case ), '''resnets''', str(_snake_case ), path['''new''']] ) lowerCAmelCase : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') snake_case__ : Any = parser.parse_args() snake_case__ : Union[str, Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: snake_case__ : Union[str, Any] = json.loads(f.read()) snake_case__ : Any = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] snake_case__ : List[str] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: snake_case__ : Dict = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) snake_case__ : List[str] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) snake_case__ : Any = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
314
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Dict = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs snake_case__ : Union[str, Any] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') snake_case__ : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) def _snake_case ( ): lowerCAmelCase : Optional[int] = cn.convert_to_negative(_snake_case ) # assert negative_img array for at least one True assert negative_img.any() def _snake_case ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(_snake_case , 110 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def _snake_case ( ): lowerCAmelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _snake_case ( ): lowerCAmelCase : Union[str, Any] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCAmelCase : Dict = canny.canny(_snake_case ) # assert canny array for at least one True assert canny_array.any() def _snake_case ( ): assert gg.gaussian_filter(_snake_case , 5 , sigma=0.9 ).all() def _snake_case ( ): # laplace diagonals lowerCAmelCase : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCAmelCase : int = conv.img_convolve(_snake_case , _snake_case ).astype(_snake_case ) assert res.any() def _snake_case ( ): assert med.median_filter(_snake_case , 3 ).any() def _snake_case ( ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = sob.sobel_filter(_snake_case ) assert grad.any() and theta.any() def _snake_case ( ): lowerCAmelCase : str = sp.make_sepia(_snake_case , 20 ) assert sepia.all() def _snake_case ( _snake_case : str = "digital_image_processing/image_data/lena_small.jpg" ): lowerCAmelCase : Tuple = bs.Burkes(imread(_snake_case , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def _snake_case ( _snake_case : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCAmelCase : int = rs.NearestNeighbour(imread(_snake_case , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def _snake_case ( ): lowerCAmelCase : int = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCAmelCase : Dict = imread(_snake_case , 0 ) # Test for get_neighbors_pixel function() return not None lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : List[str] = image[x_coordinate][y_coordinate] lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel( _snake_case , _snake_case , _snake_case , _snake_case ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCAmelCase : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCAmelCase : str = lbp.local_binary_value(_snake_case , _snake_case , _snake_case ) assert lbp_image.any()
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" def _snake_case ( _snake_case : list[list[float]] ): lowerCAmelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(_snake_case ): if len(_snake_case ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_snake_case ) ) return data_lists def _snake_case ( _snake_case : list[list[float]] , _snake_case : list[int] ): lowerCAmelCase : list[list[float]] = [] for dlist, weight in zip(_snake_case , _snake_case ): lowerCAmelCase : Dict = min(_snake_case ) lowerCAmelCase : Tuple = max(_snake_case ) lowerCAmelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCAmelCase : List[Any] = f'''Invalid weight of {weight:f} provided''' raise ValueError(_snake_case ) score_lists.append(_snake_case ) return score_lists def _snake_case ( _snake_case : list[list[float]] ): lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_snake_case ): lowerCAmelCase : Any = final_scores[j] + ele return final_scores def _snake_case ( _snake_case : list[list[float]] , _snake_case : list[int] ): lowerCAmelCase : List[Any] = get_data(_snake_case ) lowerCAmelCase : Any = calculate_each_score(_snake_case , _snake_case ) lowerCAmelCase : List[Any] = generate_final_scores(_snake_case ) # append scores to source data for i, ele in enumerate(_snake_case ): source_data[i].append(_snake_case ) return source_data
314
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports snake_case__ : Optional[Any] = ''' import os ''' snake_case__ : Tuple = ''' def foo(): import os return False ''' snake_case__ : Any = ''' def foo(): def bar(): if True: import os return False return bar() ''' snake_case__ : Any = ''' import os try: import bar except ImportError: raise ValueError() ''' snake_case__ : int = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' snake_case__ : Any = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' snake_case__ : List[str] = ''' import os try: import bar except ImportError as e: raise ValueError() ''' snake_case__ : int = ''' import os try: import bar except: raise ValueError() ''' snake_case__ : List[Any] = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' snake_case__ : Optional[int] = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' snake_case__ : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , _snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ): lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' ) with open(_snake_case , '''w''' ) as _tmp_file: _tmp_file.write(_snake_case ) lowerCAmelCase : Tuple = get_imports(_snake_case ) assert parsed_imports == ["os"]
314
1
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType snake_case__ : Optional[Any] = get_logger(__name__) def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[int]=0 ): os.makedirs(_snake_case , exist_ok=_snake_case ) with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCAmelCase : Optional[int] = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCAmelCase : Tuple = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' lowerCAmelCase : Optional[Any] = os.path.join(_snake_case , _snake_case ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(_snake_case , _snake_case ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCAmelCase : Any = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCAmelCase : Union[str, Any] = os.path.join(_snake_case , _snake_case ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(_snake_case , _snake_case ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCAmelCase : Any = os.path.join(_snake_case , f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_snake_case , exist_ok=_snake_case ) logger.info(f'''Saving model to {ckpt_dir}''' ) lowerCAmelCase : int = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=_snake_case , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , ) logger.info(f'''Model saved to {ckpt_dir}''' ) def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_snake_case ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return lowerCAmelCase : Tuple = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' lowerCAmelCase : Tuple = os.path.join(_snake_case , _snake_case ) logger.info(f'''Loading model from {input_model_file}''' ) lowerCAmelCase : int = torch.load(_snake_case ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCAmelCase : Tuple = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCAmelCase : Optional[Any] = os.path.join(_snake_case , _snake_case ) logger.info(f'''Loading model from {input_model_file}''' ) lowerCAmelCase : int = torch.load(_snake_case ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCAmelCase : List[str] = ( os.path.join(_snake_case , f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) lowerCAmelCase : Optional[Any] = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=_snake_case , storage_reader=dist_cp.FileSystemReader(_snake_case ) , planner=DefaultLoadPlanner() , ) lowerCAmelCase : Optional[int] = state_dict['''model'''] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_snake_case ) def _snake_case ( _snake_case : int , _snake_case : Tuple , _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any]=0 ): os.makedirs(_snake_case , exist_ok=_snake_case ) with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCAmelCase : List[str] = FSDP.optim_state_dict(_snake_case , _snake_case ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowerCAmelCase : Dict = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCAmelCase : Union[str, Any] = os.path.join(_snake_case , _snake_case ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_snake_case , _snake_case ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: lowerCAmelCase : List[Any] = os.path.join(_snake_case , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_snake_case , exist_ok=_snake_case ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Tuple=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCAmelCase : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowerCAmelCase : int = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) lowerCAmelCase : int = torch.load(_snake_case ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: lowerCAmelCase : Union[str, Any] = ( os.path.join(_snake_case , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) lowerCAmelCase : Optional[int] = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(_snake_case ) , ) lowerCAmelCase : Optional[int] = optim_state['''optimizer'''] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) lowerCAmelCase : int = FSDP.optim_state_dict_to_load(_snake_case , _snake_case , _snake_case ) optimizer.load_state_dict(_snake_case )
314
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ): super().__init__() lowerCAmelCase : Dict = initial_learning_rate lowerCAmelCase : List[str] = warmup_steps lowerCAmelCase : Union[str, Any] = power lowerCAmelCase : Dict = decay_schedule_fn lowerCAmelCase : str = name def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa ) lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa ) lowerCAmelCase : str = global_step_float / warmup_steps_float lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , ) def lowerCamelCase__ ( self : str ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ): lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , ) if num_warmup_steps: lowerCAmelCase : List[str] = WarmUp( initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , ) if weight_decay_rate > 0.0: lowerCAmelCase : Dict = AdamWeightDecay( learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , ) else: lowerCAmelCase : Any = tf.keras.optimizers.Adam( learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class snake_case_( a__ ): def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ): super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = weight_decay_rate lowerCAmelCase : List[str] = include_in_weight_decay lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay @classmethod def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp} return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ): super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Any = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) ) return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCAmelCase : Dict = apply_state or {} lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ): lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return False return True class snake_case_( a__ ): def __init__( self : Any ): lowerCAmelCase : Any = [] lowerCAmelCase : List[str] = None @property def lowerCamelCase__ ( self : List[str] ): if self._accum_steps is None: lowerCAmelCase : Optional[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCamelCase__ ( self : Any ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ): if not self._gradients: lowerCAmelCase : Any = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCamelCase_ ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCamelCase_ ) self._accum_steps.assign_add(1 ) def lowerCamelCase__ ( self : Union[str, Any] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
314
1
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : str ): lowerCAmelCase : Union[str, Any] = 0 @slow def lowerCamelCase__ ( self : Tuple ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) # Check that tokenizer_type ≠ model_type lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): with pytest.raises(UpperCamelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowerCamelCase__ ( self : Any ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase : Tuple = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowerCamelCase__ ( self : int ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowerCamelCase__ ( self : Any ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase : Tuple = TOKENIZER_MAPPING.values() lowerCAmelCase : str = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : str ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = '''Hello, world. How are you?''' lowerCAmelCase : str = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): # Check we can load the tokenizer config of an online model. lowerCAmelCase : Optional[int] = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase : Optional[Any] = config.pop('''_commit_hash''' , UpperCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase : Tuple = get_tokenizer_config(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Any = get_tokenizer_config(UpperCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowerCamelCase__ ( self : int ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) lowerCAmelCase : Dict = CustomTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Optional[Any] = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Any = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowerCamelCase__ ( self : Optional[Any] ): class snake_case_( a__ ): __UpperCamelCase = False class snake_case_( a__ ): __UpperCamelCase = NewTokenizer __UpperCamelCase = False try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCamelCase__ ( self : Tuple ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''bert-base''' ) def lowerCamelCase__ ( self : Any ): with self.assertRaisesRegex( UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def lowerCamelCase__ ( self : int ): # Make sure we have cached the tokenizer. lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
314
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path snake_case__ : Union[str, Any] = '''src/transformers''' # Matches is_xxx_available() snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: snake_case__ : Dict = re.compile(R'''^\s*try:''') # Catches a line with else: snake_case__ : int = re.compile(R'''^\s*else:''') def _snake_case ( _snake_case : Optional[Any] ): if _re_test_backend.search(_snake_case ) is None: return None lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : int = f.readlines() lowerCAmelCase : Tuple = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0] lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase : str = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase : int = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase : Optional[Any] = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase : Optional[Any] = lines[line_index] lowerCAmelCase : List[Any] = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase : List[str] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase : Any = lines[line_index] lowerCAmelCase : Tuple = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ): def find_duplicates(_snake_case : Tuple ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase : Any = [] for key in import_dict_objects.keys(): lowerCAmelCase : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _snake_case ( ): lowerCAmelCase : int = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' ) lowerCAmelCase : List[Any] = parse_init(_snake_case ) if objects is not None: lowerCAmelCase : Tuple = analyze_results(*_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules snake_case__ : str = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _snake_case ( ): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Any = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase : Any = spec.loader.load_module() lowerCAmelCase : Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_snake_case ) > 0: lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
314
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu snake_case__ : Any = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase__ ( self : Tuple ): return 1_2 @property def lowerCamelCase__ ( self : str ): return 1_2 @property def lowerCamelCase__ ( self : Optional[Any] ): return 3_2 @property def lowerCamelCase__ ( self : Optional[int] ): torch.manual_seed(0 ) lowerCAmelCase : List[str] = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowerCamelCase__ ( self : List[str] ): torch.manual_seed(0 ) lowerCAmelCase : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(UpperCamelCase_ ) @property def lowerCamelCase__ ( self : int ): torch.manual_seed(0 ) lowerCAmelCase : Dict = 1_2 lowerCAmelCase : Optional[Any] = 1_2 lowerCAmelCase : Optional[Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase : List[str] = TransformeraDModel(**UpperCamelCase_ ) return model def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[Any] = '''cpu''' lowerCAmelCase : List[str] = self.dummy_vqvae lowerCAmelCase : Any = self.dummy_text_encoder lowerCAmelCase : Tuple = self.dummy_tokenizer lowerCAmelCase : Optional[Any] = self.dummy_transformer lowerCAmelCase : Union[str, Any] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase : List[str] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = VQDiffusionPipeline( vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , ) lowerCAmelCase : int = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : List[Any] = '''teddy bear playing in the pool''' lowerCAmelCase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowerCAmelCase : List[str] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase : int = output.images lowerCAmelCase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowerCAmelCase : Union[str, Any] = pipe( [prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0] lowerCAmelCase : Dict = image[0, -3:, -3:, -1] lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase : Optional[int] = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[Any] = '''cpu''' lowerCAmelCase : Tuple = self.dummy_vqvae lowerCAmelCase : Any = self.dummy_text_encoder lowerCAmelCase : int = self.dummy_tokenizer lowerCAmelCase : Dict = self.dummy_transformer lowerCAmelCase : List[Any] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCamelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase : Tuple = VQDiffusionPipeline( vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , ) lowerCAmelCase : Any = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''teddy bear playing in the pool''' lowerCAmelCase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowerCAmelCase : Dict = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase : Tuple = output.images lowerCAmelCase : Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowerCAmelCase : List[Any] = pipe( [prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0] lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase : Any = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase : Any = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase : Optional[Any] = pipeline.to(UpperCamelCase_ ) pipeline.set_progress_bar_config(disable=UpperCamelCase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowerCAmelCase : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=UpperCamelCase_ , output_type='''np''' , ) lowerCAmelCase : int = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
314
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : List[str] ): lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) lowerCAmelCase : Tuple = emb.weight.data return lin_layer def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ): lowerCAmelCase : Union[str, Any] = {} for old_key in state_dict.keys(): lowerCAmelCase : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' ) else: lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCAmelCase : Tuple = state_dict[old_key] return new_dict def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ): lowerCAmelCase : Optional[Any] = [] lowerCAmelCase : Tuple = 0 os.makedirs(_snake_case , exist_ok=_snake_case ) for expert in range(_snake_case ): lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(_snake_case ): lowerCAmelCase : List[str] = torch.load(_snake_case )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Any = os.path.join( _snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) torch.save(_snake_case , _snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_snake_case )[0]].dtype ) # Add the last block lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_snake_case ) == 1: lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case ) torch.save(_snake_case , _snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_snake_case , _snake_case ) # Otherwise, let's build the index lowerCAmelCase : Dict = {} for idx, shard in enumerate(_snake_case ): lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' ) lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) ) for key in shard: lowerCAmelCase : List[Any] = shard_file # Add the metadata lowerCAmelCase : Dict = {'''total_size''': total_size} lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n''' f.write(_snake_case ) return metadata, index if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) snake_case__ : List[str] = parser.parse_args() snake_case__ , snake_case__ : Tuple = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) snake_case__ : str = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
314
1
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def _snake_case ( _snake_case : Callable[[int | float], int | float] , _snake_case : int | float , _snake_case : int | float , _snake_case : int = 100 , ): lowerCAmelCase : Optional[Any] = x_start lowerCAmelCase : str = fnc(_snake_case ) lowerCAmelCase : Optional[Any] = 0.0 for _ in range(_snake_case ): # Approximates curve as a sequence of linear lines and sums their length lowerCAmelCase : Optional[int] = (x_end - x_start) / steps + xa lowerCAmelCase : Union[str, Any] = fnc(_snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowerCAmelCase : Union[str, Any] = xa lowerCAmelCase : List[Any] = fxa return length if __name__ == "__main__": def _snake_case ( _snake_case : Optional[int] ): return math.sin(10 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') snake_case__ : Optional[Any] = 10 while i <= 100_000: print(f"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
314
"""simple docstring""" from math import sqrt def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase : Dict = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase : Optional[int] = False for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase : int = False break # precondition assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool" return status def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) ) lowerCAmelCase : Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_snake_case ) ): for j in range(i + 1 , len(_snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase : Any = 0 # filters actual prime numbers. lowerCAmelCase : Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase : Tuple = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_snake_case ): ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase : Dict = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase : Optional[int] = 2 lowerCAmelCase : List[str] = number if number == 0 or number == 1: ans.append(_snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_snake_case ): while quotient != 1: if is_prime(_snake_case ) and (quotient % factor == 0): ans.append(_snake_case ) quotient /= factor else: factor += 1 else: ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : Tuple ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : Optional[Any] = 0 # prime factorization of 'number' lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Any = max(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Dict ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : int = 0 # prime factorization of 'number' lowerCAmelCase : List[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Optional[int] = min(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool" return number % 2 == 0 def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool" return number % 2 != 0 def _snake_case ( _snake_case : Tuple ): assert ( isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case ) ), "'number' must been an int, even and > 2" lowerCAmelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case ) lowerCAmelCase : Optional[Any] = len(_snake_case ) # run variable for while-loops. lowerCAmelCase : List[str] = 0 lowerCAmelCase : Tuple = None # exit variable. for break up the loops lowerCAmelCase : str = True while i < len_pn and loop: lowerCAmelCase : str = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase : Dict = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and (len(_snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Dict = 0 while numbera != 0: lowerCAmelCase : Union[str, Any] = numbera % numbera lowerCAmelCase : List[Any] = numbera lowerCAmelCase : List[Any] = rest # precondition assert isinstance(_snake_case , _snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase : List[str] = prime_factorization(_snake_case ) lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case ) elif numbera == 1 or numbera == 1: lowerCAmelCase : Union[str, Any] = [] lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = max(_snake_case , _snake_case ) lowerCAmelCase : Dict = 0 lowerCAmelCase : int = 0 lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case ) lowerCAmelCase : Any = prime_fac_a.count(_snake_case ) for _ in range(max(_snake_case , _snake_case ) ): ans *= n else: lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _snake_case ( _snake_case : Any ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_snake_case ): ans += 1 # precondition assert isinstance(_snake_case , _snake_case ) and is_prime( _snake_case ), "'ans' must been a prime number and from type int" return ans def _snake_case ( _snake_case : Any , _snake_case : Dict ): assert ( is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number lowerCAmelCase : str = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_snake_case ): number += 1 while number < p_number_a: ans.append(_snake_case ) number += 1 # fetch the next prime number. while not is_prime(_snake_case ): number += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and ans[0] != p_number_a and ans[len(_snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _snake_case ( _snake_case : List[Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_snake_case ) # precondition assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase : int = get_divisors(_snake_case ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (divisors[0] == 1) and (divisors[len(_snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _snake_case ( _snake_case : Optional[int] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase : Optional[Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase : Dict = 0 lowerCAmelCase : Dict = 1 lowerCAmelCase : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase : int = ans ans += fiba lowerCAmelCase : Optional[Any] = tmp return ans
314
1
"""simple docstring""" from math import factorial def _snake_case ( _snake_case : int , _snake_case : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k )) if __name__ == "__main__": print( '''The number of five-card hands possible from a standard''', f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( '''If a class of 40 students must be arranged into groups of''', f"""4 for group projects, there are {combinations(40, 4)} ways""", '''to arrange them.\n''', ) print( '''If 10 teams are competing in a Formula One race, there''', f"""are {combinations(10, 3)} ways that first, second and""", '''third place can be awarded.''', )
314
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_( a__ ): __UpperCamelCase = '''vit_msn''' def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Any = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : List[Any] = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : Tuple = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : Tuple = image_size lowerCAmelCase : List[str] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = qkv_bias
314
1
"""simple docstring""" def _snake_case ( _snake_case : list ): lowerCAmelCase : List[str] = len(_snake_case ) for i in range(1 , _snake_case ): lowerCAmelCase : Tuple = collection[i] lowerCAmelCase : Any = 0 lowerCAmelCase : Tuple = i - 1 while low <= high: lowerCAmelCase : Any = (low + high) // 2 if val < collection[mid]: lowerCAmelCase : Optional[int] = mid - 1 else: lowerCAmelCase : int = mid + 1 for j in range(_snake_case , _snake_case , -1 ): lowerCAmelCase : int = collection[j - 1] lowerCAmelCase : Any = val return collection if __name__ == "__main__": snake_case__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip() snake_case__ : List[str] = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
314
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case__ : Optional[Any] = logging.getLogger(__name__) def _snake_case ( _snake_case : str ): lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case ) lowerCAmelCase : Optional[int] = { '''repo_id''': str(_snake_case ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f: json.dump(_snake_case , _snake_case , indent=4 ) def _snake_case ( _snake_case : Any ): if params.n_gpu <= 0: lowerCAmelCase : Dict = 0 lowerCAmelCase : Optional[int] = -1 lowerCAmelCase : Dict = True lowerCAmelCase : int = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] ) lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] ) lowerCAmelCase : int = int(os.environ['''RANK'''] ) # number of nodes / node ID lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node lowerCAmelCase : str = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCAmelCase : List[Any] = 1 lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : Any = 1 lowerCAmelCase : Any = 1 lowerCAmelCase : Dict = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0 lowerCAmelCase : List[Any] = params.n_nodes > 1 # summary lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def _snake_case ( _snake_case : Optional[int] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
314
1
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def _snake_case ( _snake_case : Dict ): # getting number of pixels in the image lowerCAmelCase, lowerCAmelCase : str = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_snake_case ): for j in range(_snake_case ): lowerCAmelCase : Any = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image snake_case__ : Any = imread('''image_data/lena.jpg''', 1) # convert to its negative snake_case__ : int = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
314
"""simple docstring""" def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0''' raise ValueError(_snake_case ) else: lowerCAmelCase : str = sylvester(number - 1 ) lowerCAmelCase : Optional[Any] = num - 1 lowerCAmelCase : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
314
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case_: def __init__( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]=1_3 , UpperCamelCase_ : int=3_0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Union[str, Any]=3_7 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=2 , ): lowerCAmelCase : Any = parent lowerCAmelCase : Optional[int] = batch_size lowerCAmelCase : Any = image_size lowerCAmelCase : int = patch_size lowerCAmelCase : Optional[Any] = num_channels lowerCAmelCase : Tuple = is_training lowerCAmelCase : Tuple = use_labels lowerCAmelCase : Optional[Any] = hidden_size lowerCAmelCase : Dict = num_hidden_layers lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Tuple = intermediate_size lowerCAmelCase : int = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : int = type_sequence_label_size lowerCAmelCase : List[Any] = initializer_range lowerCAmelCase : List[str] = scope lowerCAmelCase : int = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2 lowerCAmelCase : Optional[Any] = num_patches + 2 def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : Any = None if self.use_labels: lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : Dict ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[str] = TFDeiTModel(config=UpperCamelCase_ ) lowerCAmelCase : str = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int ): lowerCAmelCase : Tuple = TFDeiTForMaskedImageModeling(config=UpperCamelCase_ ) lowerCAmelCase : Any = model(UpperCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase : int = 1 lowerCAmelCase : Union[str, Any] = TFDeiTForMaskedImageModeling(UpperCamelCase_ ) lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Union[str, Any] = self.type_sequence_label_size lowerCAmelCase : int = TFDeiTForImageClassification(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase : Union[str, Any] = 1 lowerCAmelCase : Union[str, Any] = TFDeiTForImageClassification(UpperCamelCase_ ) lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = config_and_inputs lowerCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Union[str, Any] = TFDeiTModelTester(self ) lowerCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def lowerCamelCase__ ( self : List[str] ): pass def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Dense ) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()] lowerCAmelCase : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any=False ): lowerCAmelCase : Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCamelCase__ ( self : Dict ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : int = TFDeiTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def _snake_case ( ): lowerCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : str ): return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) lowerCAmelCase : str = self.default_image_processor lowerCAmelCase : str = prepare_img() lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # forward pass lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ ) # verify the logits lowerCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
314
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCAmelCase : Union[str, Any] = 6 lowerCAmelCase : Any = 128 lowerCAmelCase : List[Any] = (2, 2, 18, 2) lowerCAmelCase : Any = (4, 8, 16, 32) elif "large" in model_name: lowerCAmelCase : Tuple = 12 lowerCAmelCase : Dict = 192 lowerCAmelCase : List[str] = (2, 2, 18, 2) lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCAmelCase : Optional[int] = window_size lowerCAmelCase : Any = embed_dim lowerCAmelCase : Optional[Any] = depths lowerCAmelCase : int = num_heads return config def _snake_case ( _snake_case : Union[str, Any] ): if "encoder.mask_token" in name: lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCAmelCase : Tuple = '''layernorm.weight''' if name == "encoder.norm.bias": lowerCAmelCase : str = '''layernorm.bias''' if "decoder" in name: pass else: lowerCAmelCase : Optional[Any] = '''swin.''' + name return name def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ): for key in orig_state_dict.copy().keys(): lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case ) if "attn_mask" in key: pass elif "qkv" in key: lowerCAmelCase : List[Any] = key.split('''.''' ) lowerCAmelCase : Dict = int(key_split[2] ) lowerCAmelCase : Optional[Any] = int(key_split[4] ) lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase : Dict = val[:dim, :] lowerCAmelCase : Dict = val[ dim : dim * 2, : ] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : str = val[ :dim ] lowerCAmelCase : List[str] = val[ dim : dim * 2 ] lowerCAmelCase : Optional[Any] = val[ -dim: ] else: lowerCAmelCase : str = val return orig_state_dict def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ): lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model'''] lowerCAmelCase : List[Any] = get_swin_config(_snake_case ) lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case ) model.eval() lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) snake_case__ : Dict = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
314
1
"""simple docstring""" def _snake_case ( _snake_case : int = 1000000 ): lowerCAmelCase : List[str] = set(range(3 , _snake_case , 2 ) ) primes.add(2 ) for p in range(3 , _snake_case , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _snake_case , _snake_case ) ) ) lowerCAmelCase : Dict = [float(_snake_case ) for n in range(limit + 1 )] for p in primes: for n in range(_snake_case , limit + 1 , _snake_case ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f"""{solution() = }""")
314
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin snake_case__ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') snake_case__ : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') snake_case__ : List[Any] = '''pt''' if is_torch_available() else '''tf''' @require_sentencepiece @require_tokenizers class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = CamembertTokenizer __UpperCamelCase = CamembertTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def lowerCamelCase__ ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : Any = CamembertTokenizer(UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = '''<pad>''' lowerCAmelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_4 ) def lowerCamelCase__ ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = CamembertTokenizer(UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) lowerCAmelCase : Any = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) lowerCAmelCase : Any = '''I was born in 92000, and this is falsé.''' lowerCAmelCase : int = tokenizer.encode(UpperCamelCase_ ) lowerCAmelCase : Any = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : str = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) lowerCAmelCase : Tuple = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): if not self.test_rust_tokenizer: return lowerCAmelCase : Optional[int] = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer() lowerCAmelCase : int = '''I was born in 92000, and this is falsé.''' lowerCAmelCase : str = tokenizer.tokenize(UpperCamelCase_ ) lowerCAmelCase : int = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = self.get_rust_tokenizer() lowerCAmelCase : str = tokenizer.encode(UpperCamelCase_ ) lowerCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : int ): # fmt: off lowerCAmelCase : Tuple = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. lowerCAmelCase : List[Any] = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=UpperCamelCase_ , )
314
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : int = -1 lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : str = TextStreamer(UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() lowerCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = -1 lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = -1 lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : str = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[str] = '''''' for new_text in streamer: streamer_text += new_text
314
1
"""simple docstring""" import functools from typing import Any def _snake_case ( _snake_case : str , _snake_case : list[str] ): # Validation if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(_snake_case , _snake_case ) or not all( isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie lowerCAmelCase : dict[str, Any] = {} lowerCAmelCase : Optional[Any] = '''WORD_KEEPER''' for word in words: lowerCAmelCase : Tuple = trie for c in word: if c not in trie_node: lowerCAmelCase : List[str] = {} lowerCAmelCase : Union[str, Any] = trie_node[c] lowerCAmelCase : Any = True lowerCAmelCase : Union[str, Any] = len(_snake_case ) # Dynamic programming method @functools.cache def is_breakable(_snake_case : int ) -> bool: if index == len_string: return True lowerCAmelCase : Optional[Any] = trie for i in range(_snake_case , _snake_case ): lowerCAmelCase : Any = trie_node.get(string[i] , _snake_case ) if trie_node is None: return False if trie_node.get(_snake_case , _snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
314
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
1
"""simple docstring""" import torch from diffusers import DiffusionPipeline class snake_case_( a__ ): def __init__( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) def __call__( self : List[Any] ): lowerCAmelCase : Union[str, Any] = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) lowerCAmelCase : List[Any] = 1 lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample lowerCAmelCase : Optional[int] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample lowerCAmelCase : List[str] = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_ ) return result
314
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ): lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0] lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ ) lowerCAmelCase : Any = nsfw_detected.flatten() lowerCAmelCase : Dict = nsfw_detected > p_threshold lowerCAmelCase : int = nsfw_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ): if nsfw_detected_: lowerCAmelCase : List[Any] = np.zeros(images[idx].shape ) lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = watermark_detected.flatten() lowerCAmelCase : Optional[int] = watermark_detected > w_threshold lowerCAmelCase : Union[str, Any] = watermark_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(UpperCamelCase_ ): if watermark_detected_: lowerCAmelCase : List[str] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
314
1
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position snake_case__ : List[Any] = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip snake_case__ : Dict = concatenate_datasets snake_case__ : List[Any] = DownloadConfig snake_case__ : Tuple = DownloadManager snake_case__ : Dict = DownloadMode snake_case__ : Tuple = DownloadConfig snake_case__ : Optional[Any] = DownloadMode snake_case__ : Optional[Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
314
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } snake_case__ : Union[str, Any] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } snake_case__ : Optional[Any] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase : Tuple = do_lower_case lowerCAmelCase : Union[str, Any] = strip_accents lowerCAmelCase : Tuple = tokenize_chinese_chars lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
314
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Tuple ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): lowerCAmelCase : List[Any] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Tuple = '''sshleifer/tiny-gpt2''' lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(UpperCamelCase_ ) lowerCAmelCase : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = '''sgugger/tiny-distilbert-classification''' lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , only_pretrain_model=UpperCamelCase_ , ) lowerCAmelCase : Dict = TensorFlowBenchmark(UpperCamelCase_ ) lowerCAmelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Any = '''sshleifer/tiny-gpt2''' lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = TensorFlowBenchmark(UpperCamelCase_ ) lowerCAmelCase : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = '''sshleifer/tiny-gpt2''' lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) lowerCAmelCase : List[str] = TensorFlowBenchmark(UpperCamelCase_ , [config] ) lowerCAmelCase : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Dict = '''sshleifer/tiny-gpt2''' lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) lowerCAmelCase : List[Any] = TensorFlowBenchmark(UpperCamelCase_ , [config] ) lowerCAmelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : int = '''sshleifer/tiny-gpt2''' lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = TensorFlowBenchmark(UpperCamelCase_ ) lowerCAmelCase : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Tuple = '''sshleifer/tiny-gpt2''' lowerCAmelCase : List[str] = AutoConfig.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) lowerCAmelCase : Any = TensorFlowBenchmark(UpperCamelCase_ , [config] ) lowerCAmelCase : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Any = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) lowerCAmelCase : str = TensorFlowBenchmark(UpperCamelCase_ , configs=[config] ) lowerCAmelCase : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = '''sshleifer/tiny-gpt2''' lowerCAmelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) lowerCAmelCase : Dict = TensorFlowBenchmark(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=UpperCamelCase_ , save_to_csv=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(UpperCamelCase_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(UpperCamelCase_ , '''env.csv''' ) , multi_process=UpperCamelCase_ , ) lowerCAmelCase : str = TensorFlowBenchmark(UpperCamelCase_ ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCamelCase_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase_ , '''env.csv''' ) ).exists() ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[int] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(UpperCamelCase_ : Tuple ): self.assertTrue(hasattr(UpperCamelCase_ , '''sequential''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''cumulative''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''current''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase_ , '''log.txt''' ) , log_print=UpperCamelCase_ , trace_memory_line_by_line=UpperCamelCase_ , eager_mode=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) lowerCAmelCase : Tuple = TensorFlowBenchmark(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(UpperCamelCase_ , '''log.txt''' ) ).exists() )
314
"""simple docstring""" def _snake_case ( _snake_case : int = 50000000 ): lowerCAmelCase : List[str] = set() lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) ) lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) ) for primea in primes: lowerCAmelCase : Optional[Any] = primea * primea for primea in primes: lowerCAmelCase : List[Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase : Tuple = primea * primea * primea * primea lowerCAmelCase : Tuple = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
1
"""simple docstring""" snake_case__ : Dict = '''Input must be a string of 8 numbers plus letter''' snake_case__ : Union[str, Any] = '''TRWAGMYFPDXBNJZSQVHLCKE''' def _snake_case ( _snake_case : str ): if not isinstance(_snake_case , _snake_case ): lowerCAmelCase : str = f'''Expected string as input, found {type(_snake_case ).__name__}''' raise TypeError(_snake_case ) lowerCAmelCase : List[str] = spanish_id.replace('''-''' , '''''' ).upper() if len(_snake_case ) != 9: raise ValueError(_snake_case ) try: lowerCAmelCase : Tuple = int(spanish_id_clean[0:8] ) lowerCAmelCase : List[Any] = spanish_id_clean[8] except ValueError as ex: raise ValueError(_snake_case ) from ex if letter.isdigit(): raise ValueError(_snake_case ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Tuple = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor'''] snake_case__ : List[Any] = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] snake_case__ : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[int] = tempfile.mkdtemp() # fmt: off lowerCAmelCase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCAmelCase : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : int , **UpperCamelCase_ : Dict ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : int ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Union[str, Any] = self.get_tokenizer() lowerCAmelCase : List[str] = self.get_image_processor() lowerCAmelCase : str = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) lowerCAmelCase : int = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : str = self.get_image_processor() lowerCAmelCase : Dict = self.get_tokenizer() lowerCAmelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : List[str] = self.prepare_image_inputs() lowerCAmelCase : Dict = image_processor(UpperCamelCase_ , return_tensors='''np''' ) lowerCAmelCase : Tuple = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Union[str, Any] = self.get_image_processor() lowerCAmelCase : List[str] = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = '''lower newer''' lowerCAmelCase : List[Any] = processor(text=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer(UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Dict = self.get_image_processor() lowerCAmelCase : List[str] = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : List[Any] = '''lower newer''' lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs() lowerCAmelCase : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(UpperCamelCase_ ): processor() def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[str] = self.get_image_processor() lowerCAmelCase : List[Any] = self.get_tokenizer() lowerCAmelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase : str = processor.batch_decode(UpperCamelCase_ ) lowerCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : int = self.get_image_processor() lowerCAmelCase : List[Any] = self.get_tokenizer() lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = '''lower newer''' lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() lowerCAmelCase : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
314
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class snake_case_( unittest.TestCase ): def __init__( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=7 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[int]=1_8 , UpperCamelCase_ : List[str]=3_0 , UpperCamelCase_ : Dict=4_0_0 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , ): lowerCAmelCase : List[str] = parent lowerCAmelCase : Optional[Any] = batch_size lowerCAmelCase : Union[str, Any] = num_channels lowerCAmelCase : List[str] = image_size lowerCAmelCase : Optional[int] = min_resolution lowerCAmelCase : Optional[int] = max_resolution lowerCAmelCase : List[str] = do_resize lowerCAmelCase : List[Any] = size if size is not None else {'''height''': 1_8, '''width''': 2_0} lowerCAmelCase : Tuple = do_thumbnail lowerCAmelCase : Dict = do_align_axis lowerCAmelCase : List[str] = do_pad lowerCAmelCase : Dict = do_normalize lowerCAmelCase : Tuple = image_mean lowerCAmelCase : Any = image_std def lowerCamelCase__ ( self : int ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = DonutImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = DonutImageProcessingTester(self ) @property def lowerCamelCase__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_thumbnail''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_pad''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} ) lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) # Previous config had dimensions in (width, height) order lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) ) self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} ) def lowerCamelCase__ ( self : Any ): pass @is_flaky() def lowerCamelCase__ ( self : str ): # Initialize image_processing lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def lowerCamelCase__ ( self : Any ): # Initialize image_processing lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCAmelCase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def lowerCamelCase__ ( self : Optional[Any] ): # Initialize image_processing lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCAmelCase : Optional[int] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
314
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : List[Any] = set() for token in tokens: lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : List[str] = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Optional[Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : str = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j] lowerCAmelCase : Union[str, Any] = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : int = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : Any = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Dict ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[str] = f.readlines() lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : int = parser.parse_args() main(args)
314
1
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip snake_case__ : Tuple = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def _snake_case ( _snake_case : Tuple ): if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def _snake_case ( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Tuple ): return max(metric_fn(_snake_case , _snake_case ) for gt in ground_truths ) def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ): lowerCAmelCase : str = [line.strip() for line in open(_snake_case , '''r''' ).readlines()] lowerCAmelCase : str = [] if args.gold_data_mode == "qa": lowerCAmelCase : List[Any] = pd.read_csv(_snake_case , sep='''\t''' , header=_snake_case ) for answer_list in data[1]: lowerCAmelCase : List[Any] = ast.literal_eval(_snake_case ) answers.append(_snake_case ) else: lowerCAmelCase : List[Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()] lowerCAmelCase : Tuple = [[reference] for reference in references] lowerCAmelCase : List[Any] = 0 for prediction, ground_truths in zip(_snake_case , _snake_case ): total += 1 em += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case ) fa += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case ) lowerCAmelCase : List[str] = 100.0 * em / total lowerCAmelCase : Dict = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def _snake_case ( _snake_case : Any , _snake_case : str , _snake_case : List[str] ): lowerCAmelCase : Tuple = args.k lowerCAmelCase : List[str] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()] lowerCAmelCase : Optional[Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()] lowerCAmelCase : Optional[Any] = 0 for hypo, reference in zip(_snake_case , _snake_case ): lowerCAmelCase : List[Any] = set(hypo.split('''\t''' )[:k] ) lowerCAmelCase : int = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k lowerCAmelCase : List[Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] ): def strip_title(_snake_case : Tuple ): if title.startswith('''"''' ): lowerCAmelCase : Any = title[1:] if title.endswith('''"''' ): lowerCAmelCase : Tuple = title[:-1] return title lowerCAmelCase : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case , )['''input_ids'''].to(args.device ) lowerCAmelCase : List[str] = rag_model.rag.question_encoder(_snake_case ) lowerCAmelCase : Dict = question_enc_outputs[0] lowerCAmelCase : Dict = rag_model.retriever( _snake_case , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) lowerCAmelCase : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) lowerCAmelCase : Any = [] for docs in all_docs: lowerCAmelCase : Optional[int] = [strip_title(_snake_case ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(_snake_case ) ) return provenance_strings def _snake_case ( _snake_case : int , _snake_case : Any , _snake_case : List[str] ): with torch.no_grad(): lowerCAmelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case ) lowerCAmelCase : Union[str, Any] = inputs_dict.input_ids.to(args.device ) lowerCAmelCase : Dict = inputs_dict.attention_mask.to(args.device ) lowerCAmelCase : Any = rag_model.generate( # rag_model overwrites generate _snake_case , attention_mask=_snake_case , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_snake_case , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) lowerCAmelCase : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) if args.print_predictions: for q, a in zip(_snake_case , _snake_case ): logger.info('''Q: {} - A: {}'''.format(_snake_case , _snake_case ) ) return answers def _snake_case ( ): lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_snake_case , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=_snake_case , choices=['''exact''', '''compressed''', '''legacy'''] , type=_snake_case , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=_snake_case , type=_snake_case , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=_snake_case , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_snake_case , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=_snake_case , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=_snake_case , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=_snake_case , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=_snake_case , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=_snake_case , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=_snake_case , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=50 , type=_snake_case , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) lowerCAmelCase : int = parser.parse_args() lowerCAmelCase : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def _snake_case ( _snake_case : int ): lowerCAmelCase : Optional[int] = {} if args.model_type is None: lowerCAmelCase : List[Any] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): lowerCAmelCase : Any = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration lowerCAmelCase : Optional[int] = args.n_docs if args.index_name is not None: lowerCAmelCase : Optional[Any] = args.index_name if args.index_path is not None: lowerCAmelCase : Any = args.index_path else: lowerCAmelCase : str = BartForConditionalGeneration lowerCAmelCase : str = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , _snake_case ) lowerCAmelCase : str = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k lowerCAmelCase : Tuple = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(_snake_case , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(_snake_case ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): lowerCAmelCase : List[str] = RagRetriever.from_pretrained(_snake_case , **_snake_case ) lowerCAmelCase : Optional[int] = model_class.from_pretrained(_snake_case , retriever=_snake_case , **_snake_case ) model.retriever.init_retrieval() else: lowerCAmelCase : Optional[int] = model_class.from_pretrained(_snake_case , **_snake_case ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: lowerCAmelCase : Optional[int] = [] for line in tqdm(_snake_case ): questions.append(line.strip() ) if len(_snake_case ) == args.eval_batch_size: lowerCAmelCase : List[Any] = evaluate_batch_fn(_snake_case , _snake_case , _snake_case ) preds_file.write('''\n'''.join(_snake_case ) + '''\n''' ) preds_file.flush() lowerCAmelCase : List[str] = [] if len(_snake_case ) > 0: lowerCAmelCase : str = evaluate_batch_fn(_snake_case , _snake_case , _snake_case ) preds_file.write('''\n'''.join(_snake_case ) ) preds_file.flush() score_fn(_snake_case , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": snake_case__ : Optional[Any] = get_args() main(args)
314
"""simple docstring""" import numpy as np from PIL import Image def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Dict = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : int = 0 lowerCAmelCase : Dict = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Union[str, Any] = 0 # compute the shape of the output matrix lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : int = 0 lowerCAmelCase : Tuple = 0 return updated_arr def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Union[str, Any] = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 # compute the shape of the output matrix lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : str = 0 lowerCAmelCase : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image snake_case__ : Optional[Any] = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
314
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case_: def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Dict=1_0 , UpperCamelCase_ : str=3 , UpperCamelCase_ : str=3_2 * 4 , UpperCamelCase_ : Dict=3_2 * 6 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : str=3_2 , ): lowerCAmelCase : Union[str, Any] = parent lowerCAmelCase : Dict = batch_size lowerCAmelCase : str = is_training lowerCAmelCase : Optional[int] = use_auxiliary_loss lowerCAmelCase : str = num_queries lowerCAmelCase : Union[str, Any] = num_channels lowerCAmelCase : Dict = min_size lowerCAmelCase : List[str] = max_size lowerCAmelCase : List[str] = num_labels lowerCAmelCase : Tuple = mask_feature_size def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCamelCase_ ) lowerCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5 ).float() lowerCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long() lowerCAmelCase : Union[str, Any] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowerCamelCase__ ( self : Tuple ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowerCamelCase__ ( self : int ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self.prepare_config_and_inputs() lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ): lowerCAmelCase : List[str] = output.encoder_hidden_states lowerCAmelCase : Union[str, Any] = output.pixel_decoder_hidden_states lowerCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False ): with torch.no_grad(): lowerCAmelCase : List[str] = MaskFormerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Any = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() def comm_check_on_output(UpperCamelCase_ : Optional[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ ) lowerCAmelCase : List[Any] = model(UpperCamelCase_ ) comm_check_on_output(UpperCamelCase_ ) lowerCAmelCase : Any = model( pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ) comm_check_on_output(UpperCamelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Any = MaskFormerModelTester(self ) lowerCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowerCamelCase__ ( self : Tuple ): pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowerCamelCase__ ( self : str ): pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowerCamelCase__ ( self : str ): pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowerCamelCase__ ( self : Tuple ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCamelCase__ ( self : int ): pass def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : Optional[int] = [*signature.parameters.keys()] lowerCAmelCase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : List[str] ): for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase : List[str] = MaskFormerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[str] = (self.model_tester.min_size,) * 2 lowerCAmelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=UpperCamelCase_ ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=UpperCamelCase_ ), '''class_labels''': torch.zeros(2 , 1_0 , device=UpperCamelCase_ ).long(), } lowerCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ ) lowerCAmelCase : str = model(**UpperCamelCase_ ) self.assertTrue(outputs.loss is not None ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Dict = model_class(UpperCamelCase_ ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ ) self.assertTrue(outputs.attentions is not None ) def lowerCamelCase__ ( self : List[Any] ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase : Tuple = self.all_model_classes[1] lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss loss.backward() def lowerCamelCase__ ( self : List[str] ): # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase : Tuple = self.all_model_classes[1] lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : str = True lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() lowerCAmelCase : List[Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ) lowerCAmelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase : Optional[int] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase : Tuple = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCamelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case__ : Union[str, Any] = 1e-4 def _snake_case ( ): lowerCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : int ): return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[int] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = self.default_image_processor lowerCAmelCase : int = prepare_img() lowerCAmelCase : Dict = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) lowerCAmelCase : Dict = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(UpperCamelCase_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ ) lowerCAmelCase : int = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) lowerCAmelCase : Union[str, Any] = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(UpperCamelCase_ ) .eval() ) lowerCAmelCase : Union[str, Any] = self.default_image_processor lowerCAmelCase : Any = prepare_img() lowerCAmelCase : List[Any] = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(UpperCamelCase_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase : str = model(**UpperCamelCase_ ) # masks_queries_logits lowerCAmelCase : int = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase : List[Any] = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) # class_queries_logits lowerCAmelCase : int = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase : Dict = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(UpperCamelCase_ ) .eval() ) lowerCAmelCase : Tuple = self.default_image_processor lowerCAmelCase : List[str] = prepare_img() lowerCAmelCase : int = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(UpperCamelCase_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase : int = model(**UpperCamelCase_ ) # masks_queries_logits lowerCAmelCase : Any = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase : Optional[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] lowerCAmelCase : Dict = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) # class_queries_logits lowerCAmelCase : Union[str, Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase : str = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Union[str, Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(UpperCamelCase_ ) .eval() ) lowerCAmelCase : Optional[int] = self.default_image_processor lowerCAmelCase : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase : Union[str, Any] = inputs['''pixel_values'''].to(UpperCamelCase_ ) lowerCAmelCase : Any = [el.to(UpperCamelCase_ ) for el in inputs['''mask_labels''']] lowerCAmelCase : Union[str, Any] = [el.to(UpperCamelCase_ ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase : Tuple = model(**UpperCamelCase_ ) self.assertTrue(outputs.loss is not None )
314
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Dict = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" from math import sqrt def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase : Dict = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase : Optional[int] = False for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase : int = False break # precondition assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool" return status def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) ) lowerCAmelCase : Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_snake_case ) ): for j in range(i + 1 , len(_snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase : Any = 0 # filters actual prime numbers. lowerCAmelCase : Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase : Tuple = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_snake_case ): ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase : Dict = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase : Optional[int] = 2 lowerCAmelCase : List[str] = number if number == 0 or number == 1: ans.append(_snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_snake_case ): while quotient != 1: if is_prime(_snake_case ) and (quotient % factor == 0): ans.append(_snake_case ) quotient /= factor else: factor += 1 else: ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : Tuple ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : Optional[Any] = 0 # prime factorization of 'number' lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Any = max(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Dict ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : int = 0 # prime factorization of 'number' lowerCAmelCase : List[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Optional[int] = min(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool" return number % 2 == 0 def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool" return number % 2 != 0 def _snake_case ( _snake_case : Tuple ): assert ( isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case ) ), "'number' must been an int, even and > 2" lowerCAmelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case ) lowerCAmelCase : Optional[Any] = len(_snake_case ) # run variable for while-loops. lowerCAmelCase : List[str] = 0 lowerCAmelCase : Tuple = None # exit variable. for break up the loops lowerCAmelCase : str = True while i < len_pn and loop: lowerCAmelCase : str = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase : Dict = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and (len(_snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Dict = 0 while numbera != 0: lowerCAmelCase : Union[str, Any] = numbera % numbera lowerCAmelCase : List[Any] = numbera lowerCAmelCase : List[Any] = rest # precondition assert isinstance(_snake_case , _snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase : List[str] = prime_factorization(_snake_case ) lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case ) elif numbera == 1 or numbera == 1: lowerCAmelCase : Union[str, Any] = [] lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = max(_snake_case , _snake_case ) lowerCAmelCase : Dict = 0 lowerCAmelCase : int = 0 lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case ) lowerCAmelCase : Any = prime_fac_a.count(_snake_case ) for _ in range(max(_snake_case , _snake_case ) ): ans *= n else: lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _snake_case ( _snake_case : Any ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_snake_case ): ans += 1 # precondition assert isinstance(_snake_case , _snake_case ) and is_prime( _snake_case ), "'ans' must been a prime number and from type int" return ans def _snake_case ( _snake_case : Any , _snake_case : Dict ): assert ( is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number lowerCAmelCase : str = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_snake_case ): number += 1 while number < p_number_a: ans.append(_snake_case ) number += 1 # fetch the next prime number. while not is_prime(_snake_case ): number += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and ans[0] != p_number_a and ans[len(_snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _snake_case ( _snake_case : List[Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_snake_case ) # precondition assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase : int = get_divisors(_snake_case ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (divisors[0] == 1) and (divisors[len(_snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _snake_case ( _snake_case : Optional[int] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase : Optional[Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase : Dict = 0 lowerCAmelCase : Dict = 1 lowerCAmelCase : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase : int = ans ans += fiba lowerCAmelCase : Optional[Any] = tmp return ans
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case_( a__ ): def __init__( self : Tuple , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : List[Any] , ): super().__init__( UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : List[str] = field lowerCAmelCase : int = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths} lowerCAmelCase : Dict = Json( cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCamelCase__ ( self : str ): # Build iterable dataset if self.streaming: lowerCAmelCase : str = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase : List[str] = None lowerCAmelCase : Tuple = None lowerCAmelCase : List[Any] = None lowerCAmelCase : Optional[Any] = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) lowerCAmelCase : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset class snake_case_: def __init__( self : Optional[Any] , UpperCamelCase_ : Dataset , UpperCamelCase_ : Union[PathLike, BinaryIO] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) lowerCAmelCase : List[Any] = dataset lowerCAmelCase : Optional[Any] = path_or_buf lowerCAmelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCAmelCase : Union[str, Any] = num_proc lowerCAmelCase : Dict = '''utf-8''' lowerCAmelCase : Union[str, Any] = to_json_kwargs def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ ) lowerCAmelCase : str = self.to_json_kwargs.pop('''orient''' , '''records''' ) lowerCAmelCase : List[str] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) lowerCAmelCase : Dict = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) lowerCAmelCase : Optional[Any] = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer: lowerCAmelCase : Optional[int] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' ''' was passed. Please provide a local path instead.''' ) lowerCAmelCase : List[str] = self._write( file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs ) return written def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = args lowerCAmelCase : Union[str, Any] = query_table( table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , ) lowerCAmelCase : int = batch.to_pandas().to_json( path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : BinaryIO , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] , ): lowerCAmelCase : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): lowerCAmelCase : Optional[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(UpperCamelCase_ ) else: lowerCAmelCase, lowerCAmelCase : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(UpperCamelCase_ ) return written
314
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports snake_case__ : Optional[Any] = ''' import os ''' snake_case__ : Tuple = ''' def foo(): import os return False ''' snake_case__ : Any = ''' def foo(): def bar(): if True: import os return False return bar() ''' snake_case__ : Any = ''' import os try: import bar except ImportError: raise ValueError() ''' snake_case__ : int = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' snake_case__ : Any = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' snake_case__ : List[str] = ''' import os try: import bar except ImportError as e: raise ValueError() ''' snake_case__ : int = ''' import os try: import bar except: raise ValueError() ''' snake_case__ : List[Any] = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' snake_case__ : Optional[int] = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' snake_case__ : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , _snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ): lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' ) with open(_snake_case , '''w''' ) as _tmp_file: _tmp_file.write(_snake_case ) lowerCAmelCase : Tuple = get_imports(_snake_case ) assert parsed_imports == ["os"]
314
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCAmelCase : Union[str, Any] = 6 lowerCAmelCase : Any = 128 lowerCAmelCase : List[Any] = (2, 2, 18, 2) lowerCAmelCase : Any = (4, 8, 16, 32) elif "large" in model_name: lowerCAmelCase : Tuple = 12 lowerCAmelCase : Dict = 192 lowerCAmelCase : List[str] = (2, 2, 18, 2) lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCAmelCase : Optional[int] = window_size lowerCAmelCase : Any = embed_dim lowerCAmelCase : Optional[Any] = depths lowerCAmelCase : int = num_heads return config def _snake_case ( _snake_case : Union[str, Any] ): if "encoder.mask_token" in name: lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCAmelCase : Tuple = '''layernorm.weight''' if name == "encoder.norm.bias": lowerCAmelCase : str = '''layernorm.bias''' if "decoder" in name: pass else: lowerCAmelCase : Optional[Any] = '''swin.''' + name return name def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ): for key in orig_state_dict.copy().keys(): lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case ) if "attn_mask" in key: pass elif "qkv" in key: lowerCAmelCase : List[Any] = key.split('''.''' ) lowerCAmelCase : Dict = int(key_split[2] ) lowerCAmelCase : Optional[Any] = int(key_split[4] ) lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase : Dict = val[:dim, :] lowerCAmelCase : Dict = val[ dim : dim * 2, : ] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : str = val[ :dim ] lowerCAmelCase : List[str] = val[ dim : dim * 2 ] lowerCAmelCase : Optional[Any] = val[ -dim: ] else: lowerCAmelCase : str = val return orig_state_dict def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ): lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model'''] lowerCAmelCase : List[Any] = get_swin_config(_snake_case ) lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case ) model.eval() lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) snake_case__ : Dict = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
314
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ): super().__init__() lowerCAmelCase : Dict = initial_learning_rate lowerCAmelCase : List[str] = warmup_steps lowerCAmelCase : Union[str, Any] = power lowerCAmelCase : Dict = decay_schedule_fn lowerCAmelCase : str = name def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa ) lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa ) lowerCAmelCase : str = global_step_float / warmup_steps_float lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , ) def lowerCamelCase__ ( self : str ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ): lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , ) if num_warmup_steps: lowerCAmelCase : List[str] = WarmUp( initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , ) if weight_decay_rate > 0.0: lowerCAmelCase : Dict = AdamWeightDecay( learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , ) else: lowerCAmelCase : Any = tf.keras.optimizers.Adam( learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class snake_case_( a__ ): def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ): super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = weight_decay_rate lowerCAmelCase : List[str] = include_in_weight_decay lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay @classmethod def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp} return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ): super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Any = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) ) return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCAmelCase : Dict = apply_state or {} lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ): lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return False return True class snake_case_( a__ ): def __init__( self : Any ): lowerCAmelCase : Any = [] lowerCAmelCase : List[str] = None @property def lowerCamelCase__ ( self : List[str] ): if self._accum_steps is None: lowerCAmelCase : Optional[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCamelCase__ ( self : Any ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ): if not self._gradients: lowerCAmelCase : Any = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCamelCase_ ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCamelCase_ ) self._accum_steps.assign_add(1 ) def lowerCamelCase__ ( self : Union[str, Any] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
314
1
"""simple docstring""" def _snake_case ( _snake_case : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
314
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path snake_case__ : Union[str, Any] = '''src/transformers''' # Matches is_xxx_available() snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: snake_case__ : Dict = re.compile(R'''^\s*try:''') # Catches a line with else: snake_case__ : int = re.compile(R'''^\s*else:''') def _snake_case ( _snake_case : Optional[Any] ): if _re_test_backend.search(_snake_case ) is None: return None lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : int = f.readlines() lowerCAmelCase : Tuple = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0] lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase : str = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase : int = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase : Optional[Any] = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase : Optional[Any] = lines[line_index] lowerCAmelCase : List[Any] = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase : List[str] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase : Any = lines[line_index] lowerCAmelCase : Tuple = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ): def find_duplicates(_snake_case : Tuple ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase : Any = [] for key in import_dict_objects.keys(): lowerCAmelCase : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _snake_case ( ): lowerCAmelCase : int = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' ) lowerCAmelCase : List[Any] = parse_init(_snake_case ) if objects is not None: lowerCAmelCase : Tuple = analyze_results(*_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules snake_case__ : str = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _snake_case ( ): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Any = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase : Any = spec.loader.load_module() lowerCAmelCase : Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_snake_case ) > 0: lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
314
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Any = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Union[str, Any] = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : List[str] ): lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) lowerCAmelCase : Tuple = emb.weight.data return lin_layer def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ): lowerCAmelCase : Union[str, Any] = {} for old_key in state_dict.keys(): lowerCAmelCase : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' ) else: lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCAmelCase : Tuple = state_dict[old_key] return new_dict def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ): lowerCAmelCase : Optional[Any] = [] lowerCAmelCase : Tuple = 0 os.makedirs(_snake_case , exist_ok=_snake_case ) for expert in range(_snake_case ): lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(_snake_case ): lowerCAmelCase : List[str] = torch.load(_snake_case )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Any = os.path.join( _snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) torch.save(_snake_case , _snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_snake_case )[0]].dtype ) # Add the last block lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_snake_case ) == 1: lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case ) torch.save(_snake_case , _snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_snake_case , _snake_case ) # Otherwise, let's build the index lowerCAmelCase : Dict = {} for idx, shard in enumerate(_snake_case ): lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' ) lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) ) for key in shard: lowerCAmelCase : List[Any] = shard_file # Add the metadata lowerCAmelCase : Dict = {'''total_size''': total_size} lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n''' f.write(_snake_case ) return metadata, index if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) snake_case__ : List[str] = parser.parse_args() snake_case__ , snake_case__ : Tuple = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) snake_case__ : str = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
314
1
"""simple docstring""" def _snake_case ( _snake_case : int = 1000000 ): lowerCAmelCase : Optional[int] = limit + 1 lowerCAmelCase : int = [0] * limit for first_term in range(1 , _snake_case ): for n in range(_snake_case , _snake_case , _snake_case ): lowerCAmelCase : str = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowerCAmelCase : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
314
"""simple docstring""" from math import sqrt def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase : Dict = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase : Optional[int] = False for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase : int = False break # precondition assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool" return status def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) ) lowerCAmelCase : Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_snake_case ) ): for j in range(i + 1 , len(_snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase : Any = 0 # filters actual prime numbers. lowerCAmelCase : Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase : Tuple = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_snake_case ): ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase : Dict = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase : Optional[int] = 2 lowerCAmelCase : List[str] = number if number == 0 or number == 1: ans.append(_snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_snake_case ): while quotient != 1: if is_prime(_snake_case ) and (quotient % factor == 0): ans.append(_snake_case ) quotient /= factor else: factor += 1 else: ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : Tuple ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : Optional[Any] = 0 # prime factorization of 'number' lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Any = max(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Dict ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : int = 0 # prime factorization of 'number' lowerCAmelCase : List[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Optional[int] = min(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool" return number % 2 == 0 def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool" return number % 2 != 0 def _snake_case ( _snake_case : Tuple ): assert ( isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case ) ), "'number' must been an int, even and > 2" lowerCAmelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case ) lowerCAmelCase : Optional[Any] = len(_snake_case ) # run variable for while-loops. lowerCAmelCase : List[str] = 0 lowerCAmelCase : Tuple = None # exit variable. for break up the loops lowerCAmelCase : str = True while i < len_pn and loop: lowerCAmelCase : str = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase : Dict = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and (len(_snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Dict = 0 while numbera != 0: lowerCAmelCase : Union[str, Any] = numbera % numbera lowerCAmelCase : List[Any] = numbera lowerCAmelCase : List[Any] = rest # precondition assert isinstance(_snake_case , _snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase : List[str] = prime_factorization(_snake_case ) lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case ) elif numbera == 1 or numbera == 1: lowerCAmelCase : Union[str, Any] = [] lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = max(_snake_case , _snake_case ) lowerCAmelCase : Dict = 0 lowerCAmelCase : int = 0 lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case ) lowerCAmelCase : Any = prime_fac_a.count(_snake_case ) for _ in range(max(_snake_case , _snake_case ) ): ans *= n else: lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _snake_case ( _snake_case : Any ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_snake_case ): ans += 1 # precondition assert isinstance(_snake_case , _snake_case ) and is_prime( _snake_case ), "'ans' must been a prime number and from type int" return ans def _snake_case ( _snake_case : Any , _snake_case : Dict ): assert ( is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number lowerCAmelCase : str = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_snake_case ): number += 1 while number < p_number_a: ans.append(_snake_case ) number += 1 # fetch the next prime number. while not is_prime(_snake_case ): number += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and ans[0] != p_number_a and ans[len(_snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _snake_case ( _snake_case : List[Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_snake_case ) # precondition assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase : int = get_divisors(_snake_case ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (divisors[0] == 1) and (divisors[len(_snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _snake_case ( _snake_case : Optional[int] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase : Optional[Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase : Dict = 0 lowerCAmelCase : Dict = 1 lowerCAmelCase : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase : int = ans ans += fiba lowerCAmelCase : Optional[Any] = tmp return ans
314
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: snake_case__ : Optional[int] = None snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } snake_case__ : List[Any] = { '''facebook/mbart-large-en-ro''': 1_024, '''facebook/mbart-large-cc25''': 1_024, } # fmt: off snake_case__ : Any = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = MBartTokenizer __UpperCamelCase = [] __UpperCamelCase = [] def __init__( self : Optional[int] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : List[Any]="<unk>" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Any="<mask>" , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Tuple , ): # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : int = vocab_file lowerCAmelCase : Optional[Any] = False if not self.vocab_file else True lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) lowerCAmelCase : Tuple = { lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCAmelCase : Dict = src_lang if src_lang is not None else '''en_XX''' lowerCAmelCase : Tuple = self.convert_tokens_to_ids(self._src_lang ) lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCamelCase__ ( self : str ): return self._src_lang @src_lang.setter def lowerCamelCase__ ( self : str , UpperCamelCase_ : str ): lowerCAmelCase : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Dict = [self.sep_token_id] lowerCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Tuple ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowerCAmelCase : Tuple = src_lang lowerCAmelCase : Optional[int] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : List[Any] = self.convert_tokens_to_ids(UpperCamelCase_ ) lowerCAmelCase : int = tgt_lang_id return inputs def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "en_XX" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "ro_RO" , **UpperCamelCase_ : Optional[int] , ): lowerCAmelCase : int = src_lang lowerCAmelCase : List[Any] = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase__ ( self : Union[str, Any] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ): lowerCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ ) lowerCAmelCase : Dict = [] lowerCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str ): lowerCAmelCase : Tuple = self.convert_tokens_to_ids(UpperCamelCase_ ) lowerCAmelCase : int = [] lowerCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return lowerCAmelCase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
314
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_( a__ ): __UpperCamelCase = '''vit_msn''' def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Any = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : List[Any] = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : Tuple = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : Tuple = image_size lowerCAmelCase : List[str] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = qkv_bias
314
1
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : Dict = 16 snake_case__ : Any = 32 def _snake_case ( _snake_case : Accelerator , _snake_case : int = 16 ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase : Optional[Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_snake_case : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_snake_case , max_length=_snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase : Dict = datasets.map( _snake_case , batched=_snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_snake_case : str ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase : Optional[Any] = 8 else: lowerCAmelCase : Dict = None return tokenizer.pad( _snake_case , padding='''longest''' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCAmelCase : Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case , drop_last=_snake_case ) lowerCAmelCase : Optional[int] = DataLoader( tokenized_datasets['''validation'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case , drop_last=(accelerator.mixed_precision == '''fp8''') , ) return train_dataloader, eval_dataloader def _snake_case ( _snake_case : List[str] , _snake_case : List[str] ): # Initialize accelerator lowerCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase : Any = config['''lr'''] lowerCAmelCase : List[str] = int(config['''num_epochs'''] ) lowerCAmelCase : Optional[Any] = int(config['''seed'''] ) lowerCAmelCase : Optional[Any] = int(config['''batch_size'''] ) lowerCAmelCase : Dict = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation lowerCAmelCase : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(_snake_case ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = get_dataloaders(_snake_case , _snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase : Any = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase : Tuple = AdamW(params=model.parameters() , lr=_snake_case ) # Instantiate scheduler lowerCAmelCase : Tuple = get_linear_schedule_with_warmup( optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = accelerator.prepare( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # Now we train the model for epoch in range(_snake_case ): model.train() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase : str = model(**_snake_case ) lowerCAmelCase : List[Any] = outputs.loss lowerCAmelCase : Dict = loss / gradient_accumulation_steps accelerator.backward(_snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase : Optional[int] = model(**_snake_case ) lowerCAmelCase : Any = outputs.logits.argmax(dim=-1 ) lowerCAmelCase, lowerCAmelCase : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_snake_case , references=_snake_case , ) lowerCAmelCase : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _snake_case ) def _snake_case ( ): lowerCAmelCase : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_snake_case , default=_snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCAmelCase : Optional[Any] = parser.parse_args() lowerCAmelCase : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_snake_case , _snake_case ) if __name__ == "__main__": main()
314
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case__ : Optional[Any] = logging.getLogger(__name__) def _snake_case ( _snake_case : str ): lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case ) lowerCAmelCase : Optional[int] = { '''repo_id''': str(_snake_case ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f: json.dump(_snake_case , _snake_case , indent=4 ) def _snake_case ( _snake_case : Any ): if params.n_gpu <= 0: lowerCAmelCase : Dict = 0 lowerCAmelCase : Optional[int] = -1 lowerCAmelCase : Dict = True lowerCAmelCase : int = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] ) lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] ) lowerCAmelCase : int = int(os.environ['''RANK'''] ) # number of nodes / node ID lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node lowerCAmelCase : str = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCAmelCase : List[Any] = 1 lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : Any = 1 lowerCAmelCase : Any = 1 lowerCAmelCase : Dict = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0 lowerCAmelCase : List[Any] = params.n_nodes > 1 # summary lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def _snake_case ( _snake_case : Optional[int] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
314
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ): if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) lowerCAmelCase : Tuple = number_of_bytes // partitions lowerCAmelCase : str = [] for i in range(_snake_case ): lowerCAmelCase : List[str] = i * bytes_per_partition + 1 lowerCAmelCase : str = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
314
"""simple docstring""" def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0''' raise ValueError(_snake_case ) else: lowerCAmelCase : str = sylvester(number - 1 ) lowerCAmelCase : Optional[Any] = num - 1 lowerCAmelCase : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
314
1
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : Optional[int] = { '''vocab_file''': '''vocab.txt''', '''merges_file''': '''bpe.codes''', } snake_case__ : Any = { '''vocab_file''': { '''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''', '''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''', }, '''merges_file''': { '''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''', '''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''', }, } snake_case__ : str = { '''vinai/phobert-base''': 256, '''vinai/phobert-large''': 256, } def _snake_case ( _snake_case : List[Any] ): lowerCAmelCase : int = set() lowerCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase : Dict = char lowerCAmelCase : Union[str, Any] = set(_snake_case ) return pairs class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Optional[int]="</s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Dict="<pad>" , UpperCamelCase_ : str="<mask>" , **UpperCamelCase_ : Optional[Any] , ): super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = vocab_file lowerCAmelCase : Dict = merges_file lowerCAmelCase : List[str] = {} lowerCAmelCase : List[Any] = 0 lowerCAmelCase : str = 1 lowerCAmelCase : Any = 2 lowerCAmelCase : Optional[int] = 3 self.add_from_file(UpperCamelCase_ ) lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase : List[Any] = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase : Any = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : Optional[Any] = {} def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : Union[str, Any] = [self.cls_token_id] lowerCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[int] = [self.sep_token_id] lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase__ ( self : Any ): return len(self.encoder ) def lowerCamelCase__ ( self : str ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] ): if token in self.cache: return self.cache[token] lowerCAmelCase : str = tuple(UpperCamelCase_ ) lowerCAmelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase : Tuple = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: lowerCAmelCase : Optional[int] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase, lowerCAmelCase : Any = bigram lowerCAmelCase : Dict = [] lowerCAmelCase : Any = 0 while i < len(UpperCamelCase_ ): try: lowerCAmelCase : Optional[Any] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase : Tuple = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase : Dict = tuple(UpperCamelCase_ ) lowerCAmelCase : Dict = new_word if len(UpperCamelCase_ ) == 1: break else: lowerCAmelCase : Optional[int] = get_pairs(UpperCamelCase_ ) lowerCAmelCase : Tuple = '''@@ '''.join(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = word[:-4] lowerCAmelCase : int = word return word def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Tuple = [] lowerCAmelCase : Dict = re.findall(r'''\S+\n?''' , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) ) return split_tokens def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int] ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] ): return self.decoder.get(UpperCamelCase_ , self.unk_token ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Dict = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Union[str, Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.merges_file , UpperCamelCase_ ) return out_vocab_file, out_merge_file def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): try: with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCamelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' ) return lowerCAmelCase : List[Any] = f.readlines() for lineTmp in lines: lowerCAmelCase : Any = lineTmp.strip() lowerCAmelCase : List[Any] = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase : Dict = line[:idx] lowerCAmelCase : List[str] = len(self.encoder )
314
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCAmelCase : Union[str, Any] = 6 lowerCAmelCase : Any = 128 lowerCAmelCase : List[Any] = (2, 2, 18, 2) lowerCAmelCase : Any = (4, 8, 16, 32) elif "large" in model_name: lowerCAmelCase : Tuple = 12 lowerCAmelCase : Dict = 192 lowerCAmelCase : List[str] = (2, 2, 18, 2) lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCAmelCase : Optional[int] = window_size lowerCAmelCase : Any = embed_dim lowerCAmelCase : Optional[Any] = depths lowerCAmelCase : int = num_heads return config def _snake_case ( _snake_case : Union[str, Any] ): if "encoder.mask_token" in name: lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCAmelCase : Tuple = '''layernorm.weight''' if name == "encoder.norm.bias": lowerCAmelCase : str = '''layernorm.bias''' if "decoder" in name: pass else: lowerCAmelCase : Optional[Any] = '''swin.''' + name return name def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ): for key in orig_state_dict.copy().keys(): lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case ) if "attn_mask" in key: pass elif "qkv" in key: lowerCAmelCase : List[Any] = key.split('''.''' ) lowerCAmelCase : Dict = int(key_split[2] ) lowerCAmelCase : Optional[Any] = int(key_split[4] ) lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase : Dict = val[:dim, :] lowerCAmelCase : Dict = val[ dim : dim * 2, : ] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : str = val[ :dim ] lowerCAmelCase : List[str] = val[ dim : dim * 2 ] lowerCAmelCase : Optional[Any] = val[ -dim: ] else: lowerCAmelCase : str = val return orig_state_dict def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ): lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model'''] lowerCAmelCase : List[Any] = get_swin_config(_snake_case ) lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case ) model.eval() lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) snake_case__ : Dict = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
314
1
"""simple docstring""" def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ): lowerCAmelCase : Optional[int] = [1] for i in range(2 , _snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" lowerCAmelCase : List[str] = [] lowerCAmelCase : Dict = list(range(_snake_case ) ) # Find permutation while factorials: lowerCAmelCase : Optional[Any] = factorials.pop() lowerCAmelCase, lowerCAmelCase : str = divmod(_snake_case , _snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
314
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Optional[Any]=3_7 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Tuple=5_1_2 , UpperCamelCase_ : str=1_6 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Union[str, Any]=None , ): lowerCAmelCase : List[Any] = parent lowerCAmelCase : List[Any] = batch_size lowerCAmelCase : Optional[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[str] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : List[str] = use_labels lowerCAmelCase : Optional[int] = vocab_size lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : str = num_attention_heads lowerCAmelCase : Union[str, Any] = intermediate_size lowerCAmelCase : Any = hidden_act lowerCAmelCase : Union[str, Any] = hidden_dropout_prob lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase : List[str] = max_position_embeddings lowerCAmelCase : Any = type_vocab_size lowerCAmelCase : List[str] = type_sequence_label_size lowerCAmelCase : int = initializer_range lowerCAmelCase : Dict = num_labels lowerCAmelCase : Any = num_choices lowerCAmelCase : Union[str, Any] = scope def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : List[Any] = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : str = None if self.use_token_type_ids: lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : Union[str, Any] = None lowerCAmelCase : List[str] = None lowerCAmelCase : Union[str, Any] = None if self.use_labels: lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : int ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Optional[Any] = NystromformerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) lowerCAmelCase : Dict = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[str] = NystromformerForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[str] = NystromformerForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ): lowerCAmelCase : List[Any] = self.num_labels lowerCAmelCase : Optional[Any] = NystromformerForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = self.num_labels lowerCAmelCase : Dict = NystromformerForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = self.num_choices lowerCAmelCase : Tuple = NystromformerForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : int = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : str = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Any = config_and_inputs lowerCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Dict = NystromformerModelTester(self ) lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : List[str] ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : str ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : Optional[int] = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Optional[int] ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class snake_case_( unittest.TestCase ): @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) lowerCAmelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowerCAmelCase : str = model(UpperCamelCase_ )[0] lowerCAmelCase : Any = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = torch.tensor( [[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : int = '''the [MASK] of Belgium is Brussels''' lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) lowerCAmelCase : str = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) lowerCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : str = model(encoding.input_ids ).logits lowerCAmelCase : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , '''capital''' )
314
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : int = -1 lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : str = TextStreamer(UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() lowerCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = -1 lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = -1 lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : str = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[str] = '''''' for new_text in streamer: streamer_text += new_text
314
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class snake_case_( unittest.TestCase ): @property def lowerCamelCase__ ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCAmelCase : Union[str, Any] = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[str] = self.dummy_uncond_unet lowerCAmelCase : Dict = ScoreSdeVeScheduler() lowerCAmelCase : int = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) sde_ve.to(UpperCamelCase_ ) sde_ve.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase : int = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ ).images lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[ 0 ] lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Union[str, Any] = '''google/ncsnpp-church-256''' lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) sde_ve.to(UpperCamelCase_ ) sde_ve.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase : Any = sde_ve(num_inference_steps=1_0 , output_type='''numpy''' , generator=UpperCamelCase_ ).images lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) lowerCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
314
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
1
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class snake_case_( a__ ): __UpperCamelCase = 0 __UpperCamelCase = False __UpperCamelCase = 3.0 class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Any ): # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def lowerCamelCase__ ( self : Dict ): # If no defaults are changed, `to_kwargs` returns an empty dict. lowerCAmelCase : str = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 ) AcceleratorState._reset_state() lowerCAmelCase : Any = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) lowerCAmelCase : int = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_0_0_0 ) self.assertEqual(scaler._enabled , UpperCamelCase_ ) @require_multi_gpu def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Dict = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) snake_case__ : List[Any] = Accelerator(kwargs_handlers=[ddp_scaler]) snake_case__ : str = torch.nn.Linear(100, 200) snake_case__ : Optional[int] = accelerator.prepare(model) # Check the values changed in kwargs snake_case__ : Dict = '''''' snake_case__ : Any = model.bucket_bytes_cap // (1_024 * 1_024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
314
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ): lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0] lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ ) lowerCAmelCase : Any = nsfw_detected.flatten() lowerCAmelCase : Dict = nsfw_detected > p_threshold lowerCAmelCase : int = nsfw_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ): if nsfw_detected_: lowerCAmelCase : List[Any] = np.zeros(images[idx].shape ) lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = watermark_detected.flatten() lowerCAmelCase : Optional[int] = watermark_detected > w_threshold lowerCAmelCase : Union[str, Any] = watermark_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(UpperCamelCase_ ): if watermark_detected_: lowerCAmelCase : List[str] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
314
1
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) def _snake_case ( _snake_case : nn.ModuleList , _snake_case : nn.ModuleList , _snake_case : List[int] ): lowerCAmelCase : Any = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_snake_case ) == len(_snake_case ), f'''{len(_snake_case )} != {len(_snake_case )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) snake_case__ : Dict = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } snake_case__ : str = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _snake_case ( _snake_case : Tuple , _snake_case : List[str] ): try: lowerCAmelCase : int = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' f''' {n_student}''' ) return list(range(_snake_case ) ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] ): if n_student > n_teacher: raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(_snake_case ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _snake_case ( _snake_case : Union[str, PreTrainedModel] , _snake_case : Union[str, Path] = "student" , _snake_case : Union[int, None] = None , _snake_case : Union[int, None] = None , _snake_case : Optional[Any]=False , _snake_case : Dict=None , _snake_case : str=None , **_snake_case : Tuple , ): lowerCAmelCase : List[Any] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(_snake_case , _snake_case ): AutoTokenizer.from_pretrained(_snake_case ).save_pretrained(_snake_case ) # purely for convenience lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).eval() else: assert isinstance(_snake_case , _snake_case ), f'''teacher must be a model or string got type {type(_snake_case )}''' lowerCAmelCase : Dict = teacher.config.to_diff_dict() try: lowerCAmelCase, lowerCAmelCase : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: lowerCAmelCase : Dict = teacher_e if d is None: lowerCAmelCase : Union[str, Any] = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): lowerCAmelCase, lowerCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: lowerCAmelCase, lowerCAmelCase : Dict = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: lowerCAmelCase : List[str] = teacher_e if d is None: lowerCAmelCase : int = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_snake_case ) # Copy weights lowerCAmelCase : Union[str, Any] = teacher.config_class(**_snake_case ) lowerCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_config(_snake_case ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. lowerCAmelCase : Tuple = student.load_state_dict(teacher.state_dict() , strict=_snake_case ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save lowerCAmelCase, lowerCAmelCase : Any = list(range(_snake_case ) ), list(range(_snake_case ) ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' f''' {save_path}''' ) student.save_pretrained(_snake_case ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: lowerCAmelCase : List[int] = pick_layers_to_copy(_snake_case , _snake_case ) if d_layers_to_copy is None: lowerCAmelCase : List[int] = pick_layers_to_copy(_snake_case , _snake_case ) try: if hasattr( _snake_case , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _snake_case ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _snake_case ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _snake_case ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _snake_case ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _snake_case ) copy_layers(teacher.decoder.block , student.decoder.block , _snake_case ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) lowerCAmelCase : Any = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(_snake_case ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
314
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } snake_case__ : Union[str, Any] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } snake_case__ : Optional[Any] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase : Tuple = do_lower_case lowerCAmelCase : Union[str, Any] = strip_accents lowerCAmelCase : Tuple = tokenize_chinese_chars lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
314
1
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case__ : Dict = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = ['''CLIPFeatureExtractor'''] snake_case__ : List[Any] = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
"""simple docstring""" def _snake_case ( _snake_case : int = 50000000 ): lowerCAmelCase : List[str] = set() lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) ) lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) ) for primea in primes: lowerCAmelCase : Optional[Any] = primea * primea for primea in primes: lowerCAmelCase : List[Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase : Tuple = primea * primea * primea * primea lowerCAmelCase : Tuple = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
1
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = IFImgaImgSuperResolutionPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) __UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowerCamelCase__ ( self : int ): return self._get_superresolution_dummy_components() def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=0 ): if str(UpperCamelCase_ ).startswith('''mps''' ): lowerCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_ ) else: lowerCAmelCase : int = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowerCAmelCase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowerCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowerCAmelCase : str = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCamelCase__ ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def lowerCamelCase__ ( self : Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def lowerCamelCase__ ( self : List[str] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCamelCase__ ( self : List[str] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): self._test_save_load_local() def lowerCamelCase__ ( self : Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Tuple = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor'''] snake_case__ : List[Any] = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] snake_case__ : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar snake_case__ : str = TypeVar('''T''') class snake_case_( Generic[T] ): __UpperCamelCase = 42 # Cache store of keys __UpperCamelCase = 42 # References of the keys in cache __UpperCamelCase = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , UpperCamelCase_ : int ): lowerCAmelCase : List[str] = deque() lowerCAmelCase : Optional[Any] = set() if not n: lowerCAmelCase : Tuple = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: lowerCAmelCase : Union[str, Any] = n def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : T ): if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowerCAmelCase : Tuple = self.dq_store.pop() self.key_reference.remove(UpperCamelCase_ ) else: self.dq_store.remove(UpperCamelCase_ ) self.dq_store.appendleft(UpperCamelCase_ ) self.key_reference.add(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): for k in self.dq_store: print(UpperCamelCase_ ) def __repr__( self : Union[str, Any] ): return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : LRUCache[str | int] = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
314
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
1
"""simple docstring""" def _snake_case ( _snake_case : int = 4000000 ): lowerCAmelCase : Union[str, Any] = [] lowerCAmelCase, lowerCAmelCase : Union[str, Any] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_snake_case ) lowerCAmelCase, lowerCAmelCase : List[Any] = b, a + b return sum(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : List[Any] = set() for token in tokens: lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : List[str] = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Optional[Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : str = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j] lowerCAmelCase : Union[str, Any] = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : int = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : Any = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Dict ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[str] = f.readlines() lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : int = parser.parse_args() main(args)
314
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Dict = logging.get_logger(__name__) def _snake_case ( _snake_case : Optional[int] , _snake_case : Any=False ): lowerCAmelCase : Union[str, Any] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase : Dict = '''''' else: lowerCAmelCase : List[str] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCAmelCase : str = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase : Union[str, Any] = in_proj_bias[: config.hidden_size] lowerCAmelCase : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase : str = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :] def _snake_case ( _snake_case : str ): lowerCAmelCase : Optional[int] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : int ): lowerCAmelCase : Dict = dct.pop(_snake_case ) lowerCAmelCase : List[str] = val def _snake_case ( ): lowerCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Tuple = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]=False ): lowerCAmelCase : Optional[Any] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_snake_case , ) lowerCAmelCase : Union[str, Any] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 ) lowerCAmelCase : Any = False # load original model from timm lowerCAmelCase : int = timm.create_model(_snake_case , pretrained=_snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCAmelCase : Dict = timm_model.state_dict() if base_model: remove_classification_head_(_snake_case ) lowerCAmelCase : Union[str, Any] = create_rename_keys(_snake_case , _snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) read_in_q_k_v(_snake_case , _snake_case , _snake_case ) lowerCAmelCase : Tuple = '''huggingface/label-files''' lowerCAmelCase : Tuple = '''imagenet-1k-id2label.json''' lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase : Tuple = {int(_snake_case ): v for k, v in idalabel.items()} lowerCAmelCase : Tuple = idalabel lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowerCAmelCase : int = ViTHybridModel(_snake_case ).eval() else: lowerCAmelCase : Optional[Any] = ViTHybridForImageClassification(_snake_case ).eval() model.load_state_dict(_snake_case ) # create image processor lowerCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_snake_case ) ) lowerCAmelCase : Tuple = transform.transforms lowerCAmelCase : Tuple = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowerCAmelCase : Optional[int] = ViTHybridImageProcessor( do_resize=_snake_case , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCAmelCase : List[str] = prepare_img() lowerCAmelCase : Optional[Any] = transform(_snake_case ).unsqueeze(0 ) lowerCAmelCase : List[str] = processor(_snake_case , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(_snake_case , _snake_case ) # verify logits with torch.no_grad(): lowerCAmelCase : List[str] = model(_snake_case ) lowerCAmelCase : Optional[Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: lowerCAmelCase : Optional[int] = timm_model.forward_features(_snake_case ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 ) else: lowerCAmelCase : int = timm_model(_snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) snake_case__ : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
314
"""simple docstring""" import numpy as np from PIL import Image def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Dict = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : int = 0 lowerCAmelCase : Dict = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Union[str, Any] = 0 # compute the shape of the output matrix lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : int = 0 lowerCAmelCase : Tuple = 0 return updated_arr def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Union[str, Any] = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 # compute the shape of the output matrix lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : str = 0 lowerCAmelCase : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image snake_case__ : Optional[Any] = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
314
1
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) snake_case__ : Any = '''hf-internal-testing/tiny-random-bert''' snake_case__ : List[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') snake_case__ : Optional[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : str ): lowerCAmelCase : Any = cached_file(UpperCamelCase_ , UpperCamelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCamelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) ) with open(os.path.join(UpperCamelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase : Union[str, Any] = f.read() self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''snapshots''' , UpperCamelCase_ , UpperCamelCase_ ) ) self.assertTrue(os.path.isfile(UpperCamelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase : Union[str, Any] = cached_file(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase : List[Any] = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''snapshots''' , UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase : List[Any] = cached_file('''tiny-random-bert''' , UpperCamelCase_ ) with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase : List[str] = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCamelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase : List[Any] = cached_file(UpperCamelCase_ , '''conf''' ) def lowerCamelCase__ ( self : Tuple ): with self.assertRaisesRegex(UpperCamelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase : Any = cached_file(UpperCamelCase_ , '''conf''' ) with open(os.path.join(UpperCamelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase : List[str] = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase_ , '''.no_exist''' , UpperCamelCase_ , '''conf''' ) ) ) lowerCAmelCase : Union[str, Any] = cached_file(UpperCamelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCamelCase_ ) self.assertIsNone(UpperCamelCase_ ) lowerCAmelCase : Dict = cached_file(UpperCamelCase_ , '''conf''' , local_files_only=UpperCamelCase_ , _raise_exceptions_for_missing_entries=UpperCamelCase_ ) self.assertIsNone(UpperCamelCase_ ) lowerCAmelCase : str = mock.Mock() lowerCAmelCase : int = 5_0_0 lowerCAmelCase : Dict = {} lowerCAmelCase : List[str] = HTTPError lowerCAmelCase : Dict = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head: lowerCAmelCase : Any = cached_file(UpperCamelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCamelCase_ ) self.assertIsNone(UpperCamelCase_ ) # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase__ ( self : str ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : str ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCamelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCamelCase_ , revision='''ahaha''' ) lowerCAmelCase : int = get_file_from_repo('''bert-base-cased''' , UpperCamelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase : Optional[Any] = json.loads(open(UpperCamelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_6_8 ) def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Dict = Path(UpperCamelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCamelCase_ , '''a.txt''' ) , str(UpperCamelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCamelCase_ , '''b.txt''' ) )
314
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Dict = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" from __future__ import annotations from statistics import mean def _snake_case ( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ): lowerCAmelCase : List[Any] = [0] * no_of_processes lowerCAmelCase : Dict = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_snake_case ): lowerCAmelCase : List[str] = burst_time[i] lowerCAmelCase : list[int] = [] lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : int = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCAmelCase : Dict = [] lowerCAmelCase : Optional[Any] = -1 for i in range(_snake_case ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : str = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCAmelCase : Dict = i total_time += burst_time[target_process] completed += 1 lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Tuple = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _snake_case ( _snake_case : list[int] , _snake_case : int , _snake_case : list[int] ): lowerCAmelCase : str = [0] * no_of_processes for i in range(_snake_case ): lowerCAmelCase : str = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('''[TEST CASE 01]''') snake_case__ : Optional[Any] = 4 snake_case__ : Union[str, Any] = [2, 5, 3, 7] snake_case__ : Dict = [0, 0, 0, 0] snake_case__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes) snake_case__ : str = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''') for i, process_id in enumerate(list(range(1, 5))): print( f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int ): lowerCAmelCase : Optional[int] = [True] * limit lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : Tuple = False lowerCAmelCase : int = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCAmelCase : Union[str, Any] = i * 2 while index < limit: lowerCAmelCase : List[str] = False lowerCAmelCase : int = index + i lowerCAmelCase : Tuple = [2] for i in range(3 , _snake_case , 2 ): if is_prime[i]: primes.append(_snake_case ) return primes def _snake_case ( _snake_case : int = 1000000 ): lowerCAmelCase : Optional[int] = prime_sieve(_snake_case ) lowerCAmelCase : Any = 0 lowerCAmelCase : Optional[Any] = 0 for i in range(len(_snake_case ) ): for j in range(i + length , len(_snake_case ) ): lowerCAmelCase : Optional[int] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCAmelCase : Tuple = j - i lowerCAmelCase : List[Any] = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
314
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports snake_case__ : Optional[Any] = ''' import os ''' snake_case__ : Tuple = ''' def foo(): import os return False ''' snake_case__ : Any = ''' def foo(): def bar(): if True: import os return False return bar() ''' snake_case__ : Any = ''' import os try: import bar except ImportError: raise ValueError() ''' snake_case__ : int = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' snake_case__ : Any = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' snake_case__ : List[str] = ''' import os try: import bar except ImportError as e: raise ValueError() ''' snake_case__ : int = ''' import os try: import bar except: raise ValueError() ''' snake_case__ : List[Any] = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' snake_case__ : Optional[int] = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' snake_case__ : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , _snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ): lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' ) with open(_snake_case , '''w''' ) as _tmp_file: _tmp_file.write(_snake_case ) lowerCAmelCase : Tuple = get_imports(_snake_case ) assert parsed_imports == ["os"]
314
1
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class snake_case_( a__ ): __UpperCamelCase = '''Speech2TextFeatureExtractor''' __UpperCamelCase = '''Speech2TextTokenizer''' def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ): super().__init__(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = self.feature_extractor lowerCAmelCase : List[Any] = False def __call__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) lowerCAmelCase : Optional[int] = kwargs.pop('''raw_speech''' ) else: lowerCAmelCase : Optional[Any] = kwargs.pop('''audio''' , UpperCamelCase_ ) lowerCAmelCase : Any = kwargs.pop('''sampling_rate''' , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = kwargs.pop('''text''' , UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: lowerCAmelCase : Dict = args[0] lowerCAmelCase : Dict = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowerCAmelCase : List[str] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None: lowerCAmelCase : Any = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase : Tuple = encodings['''input_ids'''] return inputs def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : Tuple ): return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ): return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @contextmanager def lowerCamelCase__ ( self : Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) lowerCAmelCase : List[Any] = True lowerCAmelCase : Dict = self.tokenizer yield lowerCAmelCase : Optional[Any] = self.feature_extractor lowerCAmelCase : Optional[Any] = False
314
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ): super().__init__() lowerCAmelCase : Dict = initial_learning_rate lowerCAmelCase : List[str] = warmup_steps lowerCAmelCase : Union[str, Any] = power lowerCAmelCase : Dict = decay_schedule_fn lowerCAmelCase : str = name def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa ) lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa ) lowerCAmelCase : str = global_step_float / warmup_steps_float lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , ) def lowerCamelCase__ ( self : str ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ): lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , ) if num_warmup_steps: lowerCAmelCase : List[str] = WarmUp( initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , ) if weight_decay_rate > 0.0: lowerCAmelCase : Dict = AdamWeightDecay( learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , ) else: lowerCAmelCase : Any = tf.keras.optimizers.Adam( learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class snake_case_( a__ ): def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ): super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = weight_decay_rate lowerCAmelCase : List[str] = include_in_weight_decay lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay @classmethod def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp} return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ): super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Any = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) ) return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCAmelCase : Dict = apply_state or {} lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ): lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ ) lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None: return False return True class snake_case_( a__ ): def __init__( self : Any ): lowerCAmelCase : Any = [] lowerCAmelCase : List[str] = None @property def lowerCamelCase__ ( self : List[str] ): if self._accum_steps is None: lowerCAmelCase : Optional[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCamelCase__ ( self : Any ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ): if not self._gradients: lowerCAmelCase : Any = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCamelCase_ ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCamelCase_ ) self._accum_steps.assign_add(1 ) def lowerCamelCase__ ( self : Union[str, Any] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
314
1
"""simple docstring""" def _snake_case ( _snake_case : int ): if n == 1 or not isinstance(_snake_case , _snake_case ): return 0 elif n == 2: return 1 else: lowerCAmelCase : Optional[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case ( _snake_case : int ): lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : List[str] = 2 while digits < n: index += 1 lowerCAmelCase : Tuple = len(str(fibonacci(_snake_case ) ) ) return index def _snake_case ( _snake_case : int = 1000 ): return fibonacci_digits_index(_snake_case ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
314
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path snake_case__ : Union[str, Any] = '''src/transformers''' # Matches is_xxx_available() snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: snake_case__ : Dict = re.compile(R'''^\s*try:''') # Catches a line with else: snake_case__ : int = re.compile(R'''^\s*else:''') def _snake_case ( _snake_case : Optional[Any] ): if _re_test_backend.search(_snake_case ) is None: return None lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : int = f.readlines() lowerCAmelCase : Tuple = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0] lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase : str = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase : int = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase : Optional[Any] = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase : Optional[Any] = lines[line_index] lowerCAmelCase : List[Any] = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase : List[str] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase : Any = lines[line_index] lowerCAmelCase : Tuple = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ): def find_duplicates(_snake_case : Tuple ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase : Any = [] for key in import_dict_objects.keys(): lowerCAmelCase : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _snake_case ( ): lowerCAmelCase : int = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' ) lowerCAmelCase : List[Any] = parse_init(_snake_case ) if objects is not None: lowerCAmelCase : Tuple = analyze_results(*_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules snake_case__ : str = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _snake_case ( ): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Any = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase : Any = spec.loader.load_module() lowerCAmelCase : Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_snake_case ) > 0: lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
314
1
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = ProphetNetTokenizer __UpperCamelCase = False def lowerCamelCase__ ( self : str ): super().setUp() lowerCAmelCase : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Optional[int] = '''UNwant\u00E9d,running''' lowerCAmelCase : Any = '''unwanted, running''' return input_text, output_text def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : int = self.tokenizer_class(self.vocab_file ) lowerCAmelCase : int = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : int = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : str = BasicTokenizer(do_lower_case=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] lowerCAmelCase : List[str] = {} for i, token in enumerate(UpperCamelCase_ ): lowerCAmelCase : Tuple = i lowerCAmelCase : int = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) lowerCAmelCase : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCAmelCase : Optional[Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2] lowerCAmelCase : List[str] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def lowerCamelCase__ ( self : List[Any] ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def lowerCamelCase__ ( self : List[Any] ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def lowerCamelCase__ ( self : Optional[int] ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) lowerCAmelCase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) assert encoded_sentence == text + [1_0_2] assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
314
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : List[str] ): lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) lowerCAmelCase : Tuple = emb.weight.data return lin_layer def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ): lowerCAmelCase : Union[str, Any] = {} for old_key in state_dict.keys(): lowerCAmelCase : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' ) else: lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCAmelCase : Tuple = state_dict[old_key] return new_dict def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ): lowerCAmelCase : Optional[Any] = [] lowerCAmelCase : Tuple = 0 os.makedirs(_snake_case , exist_ok=_snake_case ) for expert in range(_snake_case ): lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(_snake_case ): lowerCAmelCase : List[str] = torch.load(_snake_case )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Any = os.path.join( _snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) torch.save(_snake_case , _snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_snake_case )[0]].dtype ) # Add the last block lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) ) lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case ) lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_snake_case ) == 1: lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case ) torch.save(_snake_case , _snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_snake_case , _snake_case ) # Otherwise, let's build the index lowerCAmelCase : Dict = {} for idx, shard in enumerate(_snake_case ): lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' ) lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) ) for key in shard: lowerCAmelCase : List[Any] = shard_file # Add the metadata lowerCAmelCase : Dict = {'''total_size''': total_size} lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n''' f.write(_snake_case ) return metadata, index if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) snake_case__ : List[str] = parser.parse_args() snake_case__ , snake_case__ : Tuple = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) snake_case__ : str = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
314
1
"""simple docstring""" from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING snake_case__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class snake_case_( a__ ): def __init__( self : str , *UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) requires_backends(self , '''decord''' ) self.check_model_type(UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None ): lowerCAmelCase : str = {} if frame_sampling_rate is not None: lowerCAmelCase : Union[str, Any] = frame_sampling_rate if num_frames is not None: lowerCAmelCase : Optional[Any] = num_frames lowerCAmelCase : List[str] = {} if top_k is not None: lowerCAmelCase : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : Optional[int] , UpperCamelCase_ : Union[str, List[str]] , **UpperCamelCase_ : str ): return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=1 ): if num_frames is None: lowerCAmelCase : str = self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): lowerCAmelCase : Any = BytesIO(requests.get(UpperCamelCase_ ).content ) lowerCAmelCase : Optional[Any] = VideoReader(UpperCamelCase_ ) videoreader.seek(0 ) lowerCAmelCase : List[str] = 0 lowerCAmelCase : int = num_frames * frame_sampling_rate - 1 lowerCAmelCase : Optional[Any] = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa ) lowerCAmelCase : Any = videoreader.get_batch(UpperCamelCase_ ).asnumpy() lowerCAmelCase : str = list(UpperCamelCase_ ) lowerCAmelCase : Tuple = self.image_processor(UpperCamelCase_ , return_tensors=self.framework ) return model_inputs def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] ): lowerCAmelCase : Union[str, Any] = self.model(**UpperCamelCase_ ) return model_outputs def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=5 ): if top_k > self.model.config.num_labels: lowerCAmelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0] lowerCAmelCase, lowerCAmelCase : str = probs.topk(UpperCamelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowerCAmelCase : int = scores.tolist() lowerCAmelCase : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
314
"""simple docstring""" from math import sqrt def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase : Dict = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase : Optional[int] = False for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase : int = False break # precondition assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool" return status def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) ) lowerCAmelCase : Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_snake_case ) ): for j in range(i + 1 , len(_snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase : Any = 0 # filters actual prime numbers. lowerCAmelCase : Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase : Tuple = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_snake_case ): ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase : Dict = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase : Optional[int] = 2 lowerCAmelCase : List[str] = number if number == 0 or number == 1: ans.append(_snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_snake_case ): while quotient != 1: if is_prime(_snake_case ) and (quotient % factor == 0): ans.append(_snake_case ) quotient /= factor else: factor += 1 else: ans.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list" return ans def _snake_case ( _snake_case : Tuple ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : Optional[Any] = 0 # prime factorization of 'number' lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Any = max(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Dict ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase : int = 0 # prime factorization of 'number' lowerCAmelCase : List[Any] = prime_factorization(_snake_case ) lowerCAmelCase : Optional[int] = min(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool" return number % 2 == 0 def _snake_case ( _snake_case : List[str] ): assert isinstance(_snake_case , _snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool" return number % 2 != 0 def _snake_case ( _snake_case : Tuple ): assert ( isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case ) ), "'number' must been an int, even and > 2" lowerCAmelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case ) lowerCAmelCase : Optional[Any] = len(_snake_case ) # run variable for while-loops. lowerCAmelCase : List[str] = 0 lowerCAmelCase : Tuple = None # exit variable. for break up the loops lowerCAmelCase : str = True while i < len_pn and loop: lowerCAmelCase : str = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase : Dict = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and (len(_snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Dict = 0 while numbera != 0: lowerCAmelCase : Union[str, Any] = numbera % numbera lowerCAmelCase : List[Any] = numbera lowerCAmelCase : List[Any] = rest # precondition assert isinstance(_snake_case , _snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase : List[str] = prime_factorization(_snake_case ) lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case ) elif numbera == 1 or numbera == 1: lowerCAmelCase : Union[str, Any] = [] lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = max(_snake_case , _snake_case ) lowerCAmelCase : Dict = 0 lowerCAmelCase : int = 0 lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case ) lowerCAmelCase : Any = prime_fac_a.count(_snake_case ) for _ in range(max(_snake_case , _snake_case ) ): ans *= n else: lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case ) for _ in range(_snake_case ): ans *= n done.append(_snake_case ) # precondition assert isinstance(_snake_case , _snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _snake_case ( _snake_case : Any ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_snake_case ): ans += 1 # precondition assert isinstance(_snake_case , _snake_case ) and is_prime( _snake_case ), "'ans' must been a prime number and from type int" return ans def _snake_case ( _snake_case : Any , _snake_case : Dict ): assert ( is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number lowerCAmelCase : str = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_snake_case ): number += 1 while number < p_number_a: ans.append(_snake_case ) number += 1 # fetch the next prime number. while not is_prime(_snake_case ): number += 1 # precondition assert ( isinstance(_snake_case , _snake_case ) and ans[0] != p_number_a and ans[len(_snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _snake_case ( _snake_case : List[Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_snake_case ) # precondition assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase : int = get_divisors(_snake_case ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (divisors[0] == 1) and (divisors[len(_snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ): assert ( isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) ) # precondition assert ( isinstance(_snake_case , _snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _snake_case ( _snake_case : Optional[int] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase : Optional[Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _snake_case ( _snake_case : Union[str, Any] ): assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase : Dict = 0 lowerCAmelCase : Dict = 1 lowerCAmelCase : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase : int = ans ans += fiba lowerCAmelCase : Optional[Any] = tmp return ans
314
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Union[str, Any] = logging.get_logger(__name__) def _snake_case ( _snake_case : str ): lowerCAmelCase : str = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase : Optional[int] = 192 lowerCAmelCase : List[Any] = 768 lowerCAmelCase : Union[str, Any] = 12 lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : Union[str, Any] = [800, 1333] lowerCAmelCase : Tuple = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase : Dict = 330 lowerCAmelCase : List[str] = 14 lowerCAmelCase : List[str] = 6 lowerCAmelCase : Dict = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase : Optional[int] = 384 lowerCAmelCase : str = 1536 lowerCAmelCase : Tuple = 12 lowerCAmelCase : str = 6 elif "yolos_b" in yolos_name: lowerCAmelCase : Optional[int] = [800, 1344] lowerCAmelCase : int = 91 lowerCAmelCase : List[str] = '''huggingface/label-files''' lowerCAmelCase : Dict = '''coco-detection-id2label.json''' lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase : str = {int(_snake_case ): v for k, v in idalabel.items()} lowerCAmelCase : Any = idalabel lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} return config def _snake_case ( _snake_case : dict , _snake_case : YolosConfig , _snake_case : bool = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCAmelCase : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase : int = in_proj_weight[: config.hidden_size, :] lowerCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCAmelCase : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase : List[str] = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :] def _snake_case ( _snake_case : str ): if "backbone" in name: lowerCAmelCase : Optional[int] = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase : Union[str, Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase : Union[str, Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase : Union[str, Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase : str = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase : List[str] = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase : Optional[Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase : List[Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase : int = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase : Union[str, Any] = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase : List[str] = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase : int = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def _snake_case ( _snake_case : dict , _snake_case : YolosForObjectDetection ): for key in orig_state_dict.copy().keys(): lowerCAmelCase : Any = orig_state_dict.pop(_snake_case ) if "qkv" in key: lowerCAmelCase : int = key.split('''.''' ) lowerCAmelCase : List[Any] = int(key_split[2] ) lowerCAmelCase : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase : Optional[int] = val[:dim, :] lowerCAmelCase : Optional[Any] = val[ dim : dim * 2, : ] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : Union[str, Any] = val[:dim] lowerCAmelCase : int = val[dim : dim * 2] lowerCAmelCase : Dict = val[-dim:] else: lowerCAmelCase : Optional[int] = val return orig_state_dict def _snake_case ( ): lowerCAmelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Optional[Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : bool = False ): lowerCAmelCase : Optional[Any] = get_yolos_config(_snake_case ) # load original state_dict lowerCAmelCase : Tuple = torch.load(_snake_case , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase : Dict = YolosForObjectDetection(_snake_case ) model.eval() lowerCAmelCase : List[Any] = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase : Optional[int] = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase : Tuple = YolosImageProcessor(format='''coco_detection''' , size=_snake_case ) lowerCAmelCase : int = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase : int = model(**_snake_case ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = outputs.logits, outputs.pred_boxes lowerCAmelCase, lowerCAmelCase : Union[str, Any] = None, None if yolos_name == "yolos_ti": lowerCAmelCase : Optional[int] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowerCAmelCase : Optional[int] = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase : str = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowerCAmelCase : List[Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase : Optional[Any] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowerCAmelCase : Optional[Any] = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase : Tuple = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowerCAmelCase : List[str] = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowerCAmelCase : Any = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowerCAmelCase : Any = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1E-4 ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: lowerCAmelCase : int = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase : int = model_mapping[yolos_name] image_processor.push_to_hub(_snake_case , organization='''hustvl''' ) model.push_to_hub(_snake_case , organization='''hustvl''' ) if __name__ == "__main__": snake_case__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) snake_case__ : Optional[int] = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
314
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_( a__ ): __UpperCamelCase = '''vit_msn''' def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Any = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : List[Any] = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : Tuple = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : Tuple = image_size lowerCAmelCase : List[str] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = qkv_bias
314
1
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( _snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): # Initialise PyTorch model lowerCAmelCase : Any = TaConfig.from_json_file(_snake_case ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCAmelCase : List[Any] = TaForConditionalGeneration(_snake_case ) # Load weights from tf checkpoint load_tf_weights_in_ta(_snake_case , _snake_case , _snake_case ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": snake_case__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) snake_case__ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
314
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case__ : Optional[Any] = logging.getLogger(__name__) def _snake_case ( _snake_case : str ): lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case ) lowerCAmelCase : Optional[int] = { '''repo_id''': str(_snake_case ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f: json.dump(_snake_case , _snake_case , indent=4 ) def _snake_case ( _snake_case : Any ): if params.n_gpu <= 0: lowerCAmelCase : Dict = 0 lowerCAmelCase : Optional[int] = -1 lowerCAmelCase : Dict = True lowerCAmelCase : int = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] ) lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] ) lowerCAmelCase : int = int(os.environ['''RANK'''] ) # number of nodes / node ID lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node lowerCAmelCase : str = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCAmelCase : List[Any] = 1 lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : Any = 1 lowerCAmelCase : Any = 1 lowerCAmelCase : Dict = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0 lowerCAmelCase : List[Any] = params.n_nodes > 1 # summary lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def _snake_case ( _snake_case : Optional[int] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
314
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable snake_case__ : Dict = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : str = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys snake_case__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
"""simple docstring""" def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0''' raise ValueError(_snake_case ) else: lowerCAmelCase : str = sylvester(number - 1 ) lowerCAmelCase : Optional[Any] = num - 1 lowerCAmelCase : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
314
1
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('''Googling.....''') snake_case__ : Optional[Any] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:]) snake_case__ : Any = requests.get(url, headers={'''UserAgent''': UserAgent().random}) # res.raise_for_status() with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) snake_case__ : Union[str, Any] = BeautifulSoup(res.text, '''html.parser''') snake_case__ : List[Any] = list(soup.select('''.eZt8xd'''))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('''href''')) else: webbrowser.open(f"""https://google.com{link.get("href")}""")
314
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCAmelCase : Union[str, Any] = 6 lowerCAmelCase : Any = 128 lowerCAmelCase : List[Any] = (2, 2, 18, 2) lowerCAmelCase : Any = (4, 8, 16, 32) elif "large" in model_name: lowerCAmelCase : Tuple = 12 lowerCAmelCase : Dict = 192 lowerCAmelCase : List[str] = (2, 2, 18, 2) lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCAmelCase : Optional[int] = window_size lowerCAmelCase : Any = embed_dim lowerCAmelCase : Optional[Any] = depths lowerCAmelCase : int = num_heads return config def _snake_case ( _snake_case : Union[str, Any] ): if "encoder.mask_token" in name: lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCAmelCase : Tuple = '''layernorm.weight''' if name == "encoder.norm.bias": lowerCAmelCase : str = '''layernorm.bias''' if "decoder" in name: pass else: lowerCAmelCase : Optional[Any] = '''swin.''' + name return name def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ): for key in orig_state_dict.copy().keys(): lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case ) if "attn_mask" in key: pass elif "qkv" in key: lowerCAmelCase : List[Any] = key.split('''.''' ) lowerCAmelCase : Dict = int(key_split[2] ) lowerCAmelCase : Optional[Any] = int(key_split[4] ) lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase : Dict = val[:dim, :] lowerCAmelCase : Dict = val[ dim : dim * 2, : ] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : str = val[ :dim ] lowerCAmelCase : List[str] = val[ dim : dim * 2 ] lowerCAmelCase : Optional[Any] = val[ -dim: ] else: lowerCAmelCase : str = val return orig_state_dict def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ): lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model'''] lowerCAmelCase : List[Any] = get_swin_config(_snake_case ) lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case ) model.eval() lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) snake_case__ : Dict = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
314
1
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : List[Any] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = ['''pixel_values'''] def __init__( self : Optional[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : float = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase : List[Any] = do_resize lowerCAmelCase : Dict = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase : Optional[int] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase : Dict = resample lowerCAmelCase : Optional[int] = do_rescale lowerCAmelCase : Optional[int] = rescale_factor lowerCAmelCase : int = do_normalize lowerCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : float , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) lowerCAmelCase : List[Any] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase : Any = int(shortest_edge / crop_pct ) lowerCAmelCase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase : int = resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=UpperCamelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( UpperCamelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[int] , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : float = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Tuple , ): lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase : Union[str, Any] = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase : str = resample if resample is not None else self.resample lowerCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase : Tuple = image_std if image_std is not None else self.image_std lowerCAmelCase : int = size if size is not None else self.size lowerCAmelCase : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase : Tuple = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase : str = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: lowerCAmelCase : Dict = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , crop_pct=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase : Dict = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: lowerCAmelCase : Dict = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowerCAmelCase : Tuple = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
314
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : int = -1 lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : str = TextStreamer(UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() lowerCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = -1 lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = -1 lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : str = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[str] = '''''' for new_text in streamer: streamer_text += new_text
314
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() lowerCAmelCase : Any = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCAmelCase : List[Any] = { '''do_resize''': True, '''size''': 2_0, '''do_center_crop''': True, '''crop_size''': 1_8, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowerCAmelCase : Any = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str , **UpperCamelCase_ : Union[str, Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : int , **UpperCamelCase_ : Optional[Any] ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , **UpperCamelCase_ : Tuple ): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self : str ): lowerCAmelCase : Tuple = self.get_tokenizer() lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer() lowerCAmelCase : Any = self.get_image_processor() lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) lowerCAmelCase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) lowerCAmelCase : Dict = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) lowerCAmelCase : Any = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Tuple = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase : Dict = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) lowerCAmelCase : Union[str, Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Tuple = self.get_image_processor() lowerCAmelCase : int = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : int = self.prepare_image_inputs() lowerCAmelCase : str = image_processor(UpperCamelCase_ , return_tensors='''np''' ) lowerCAmelCase : Optional[int] = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[int] = self.get_image_processor() lowerCAmelCase : int = self.get_tokenizer() lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = '''lower newer''' lowerCAmelCase : Any = processor(text=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer(UpperCamelCase_ , padding='''max_length''' , max_length=6_4 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = self.get_image_processor() lowerCAmelCase : List[str] = self.get_tokenizer() lowerCAmelCase : List[str] = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = '''lower newer''' lowerCAmelCase : Optional[int] = self.prepare_image_inputs() lowerCAmelCase : int = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[str] = self.get_image_processor() lowerCAmelCase : List[Any] = self.get_tokenizer() lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase : Dict = processor.batch_decode(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[Any] = self.get_image_processor() lowerCAmelCase : Dict = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = '''lower newer''' lowerCAmelCase : Any = self.prepare_image_inputs() lowerCAmelCase : Optional[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
314
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
1
"""simple docstring""" import os import numpy import onnx def _snake_case ( _snake_case : Tuple , _snake_case : Optional[int] ): lowerCAmelCase : int = a.name lowerCAmelCase : Union[str, Any] = b.name lowerCAmelCase : List[Any] = '''''' lowerCAmelCase : int = '''''' lowerCAmelCase : Tuple = a == b lowerCAmelCase : Tuple = name_a lowerCAmelCase : Dict = name_b return res def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : int ): for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_snake_case , _snake_case ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) _graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int] ): for n in graph_proto.node: _node_replace_input_with(_snake_case , _snake_case , _snake_case ) def _snake_case ( _snake_case : str , _snake_case : Tuple , _snake_case : Tuple ): lowerCAmelCase : List[Any] = list(model.graph.initializer ) lowerCAmelCase : str = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowerCAmelCase : List[Any] = inits[i].name lowerCAmelCase : List[Any] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case ) def _snake_case ( _snake_case : int ): lowerCAmelCase : Any = os.path.dirname(_snake_case ) lowerCAmelCase : Union[str, Any] = os.path.basename(_snake_case ) lowerCAmelCase : Optional[Any] = onnx.load(os.path.join(_snake_case , _snake_case ) ) lowerCAmelCase : Union[str, Any] = list(model.graph.initializer ) lowerCAmelCase : Dict = set() lowerCAmelCase : Optional[int] = {} lowerCAmelCase : str = [] lowerCAmelCase : int = 0 for i in range(len(_snake_case ) ): if i in dup_set: continue for j in range(i + 1 , len(_snake_case ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_snake_case ) dup_set.add(_snake_case ) lowerCAmelCase : Optional[int] = inits[j].data_type lowerCAmelCase : Tuple = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , _snake_case ) total_reduced_size += mem_size lowerCAmelCase : Tuple = inits[i].name lowerCAmelCase : int = inits[j].name if name_i in dup_map: dup_map[name_i].append(_snake_case ) else: lowerCAmelCase : str = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' ) lowerCAmelCase : Optional[Any] = sorted(_snake_case ) _remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case ) lowerCAmelCase : Optional[Any] = '''optimized_''' + model_file_name lowerCAmelCase : Dict = os.path.join(_snake_case , _snake_case ) onnx.save(_snake_case , _snake_case ) return new_model
314
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ): lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0] lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ ) lowerCAmelCase : Any = nsfw_detected.flatten() lowerCAmelCase : Dict = nsfw_detected > p_threshold lowerCAmelCase : int = nsfw_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ): if nsfw_detected_: lowerCAmelCase : List[Any] = np.zeros(images[idx].shape ) lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = watermark_detected.flatten() lowerCAmelCase : Optional[int] = watermark_detected > w_threshold lowerCAmelCase : Union[str, Any] = watermark_detected.tolist() if any(UpperCamelCase_ ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(UpperCamelCase_ ): if watermark_detected_: lowerCAmelCase : List[str] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
314
1
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract snake_case__ : Optional[Any] = logging.get_logger(__name__) def _snake_case ( _snake_case : int , _snake_case : List[Any] , _snake_case : List[Any] ): return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def _snake_case ( _snake_case : np.ndarray , _snake_case : Optional[str] , _snake_case : Optional[str] ): lowerCAmelCase : str = to_pil_image(_snake_case ) lowerCAmelCase, lowerCAmelCase : Any = pil_image.size lowerCAmelCase : Optional[Any] = pytesseract.image_to_data(_snake_case , lang=_snake_case , output_type='''dict''' , config=_snake_case ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates lowerCAmelCase : int = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] lowerCAmelCase : int = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] lowerCAmelCase : Any = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] lowerCAmelCase : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] lowerCAmelCase : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] lowerCAmelCase : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowerCAmelCase : List[str] = [] for x, y, w, h in zip(_snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase : Tuple = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes lowerCAmelCase : List[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case , _snake_case , _snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case_( a__ ): __UpperCamelCase = ['''pixel_values'''] def __init__( self : Union[str, Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : float = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "" , **UpperCamelCase_ : int , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Any = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = do_resize lowerCAmelCase : str = size lowerCAmelCase : List[str] = resample lowerCAmelCase : Optional[int] = do_rescale lowerCAmelCase : int = rescale_value lowerCAmelCase : Optional[Any] = do_normalize lowerCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD lowerCAmelCase : Any = apply_ocr lowerCAmelCase : Tuple = ocr_lang lowerCAmelCase : Union[str, Any] = tesseract_config def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ): lowerCAmelCase : List[str] = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase : int = (size['''height'''], size['''width''']) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, Iterable[float]] , UpperCamelCase_ : Union[float, Iterable[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Union[str, Any] , ): lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase : str = size if size is not None else self.size lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ ) lowerCAmelCase : Any = resample if resample is not None else self.resample lowerCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean lowerCAmelCase : int = image_std if image_std is not None else self.image_std lowerCAmelCase : int = apply_ocr if apply_ocr is not None else self.apply_ocr lowerCAmelCase : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang lowerCAmelCase : Optional[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config lowerCAmelCase : List[Any] = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. lowerCAmelCase : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) lowerCAmelCase : Optional[int] = [] lowerCAmelCase : str = [] for image in images: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = apply_tesseract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) words_batch.append(UpperCamelCase_ ) boxes_batch.append(UpperCamelCase_ ) if do_resize: lowerCAmelCase : int = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase : Dict = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: lowerCAmelCase : int = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] lowerCAmelCase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowerCAmelCase : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCamelCase_ ) if apply_ocr: lowerCAmelCase : Tuple = words_batch lowerCAmelCase : Dict = boxes_batch return data
314
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } snake_case__ : Union[str, Any] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } snake_case__ : Optional[Any] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase : Tuple = do_lower_case lowerCAmelCase : Union[str, Any] = strip_accents lowerCAmelCase : Tuple = tokenize_chinese_chars lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
314
1
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Any = {'''vocab_file''': '''spiece.model'''} snake_case__ : Tuple = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } snake_case__ : Tuple = { '''google/bigbird-roberta-base''': 4_096, '''google/bigbird-roberta-large''': 4_096, '''google/bigbird-base-trivia-itc''': 4_096, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = [] def __init__( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : Optional[int]="</s>" , UpperCamelCase_ : Tuple="<pad>" , UpperCamelCase_ : Tuple="[SEP]" , UpperCamelCase_ : int="[MASK]" , UpperCamelCase_ : List[str]="[CLS]" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Optional[Any] , ): lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token lowerCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowerCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowerCAmelCase : Optional[Any] = vocab_file lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase_ ) @property def lowerCamelCase__ ( self : Optional[int] ): return self.sp_model.get_piece_size() def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): lowerCAmelCase : Tuple = self.__dict__.copy() lowerCAmelCase : Union[str, Any] = None return state def __setstate__( self : str , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase : List[Any] = {} lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): return self.sp_model.piece_to_id(UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : List[Any] = self.sp_model.IdToPiece(UpperCamelCase_ ) return token def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : List[Any] = [] lowerCAmelCase : str = '''''' lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase_ ) + token lowerCAmelCase : Optional[Any] = True lowerCAmelCase : int = [] else: current_sub_tokens.append(UpperCamelCase_ ) lowerCAmelCase : int = False out_string += self.sp_model.decode(UpperCamelCase_ ) return out_string.strip() def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[str] , ): lowerCAmelCase : Optional[Any] = kwargs.pop('''use_source_tokenizer''' , UpperCamelCase_ ) lowerCAmelCase : List[str] = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCAmelCase : str = [] lowerCAmelCase : Union[str, Any] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) ) lowerCAmelCase : Dict = [] sub_texts.append(UpperCamelCase_ ) else: current_sub_text.append(UpperCamelCase_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: lowerCAmelCase : Tuple = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(UpperCamelCase_ ) ) else: lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCAmelCase : Tuple = self.clean_up_tokenization(UpperCamelCase_ ) return clean_text else: return text def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : str = [self.cls_token_id] lowerCAmelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : int = [self.sep_token_id] lowerCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
"""simple docstring""" # Function to print upper half of diamond (pyramid) def _snake_case ( _snake_case : List[Any] ): for i in range(0 , _snake_case ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def _snake_case ( _snake_case : Dict ): for i in range(_snake_case , 0 , -1 ): for _ in range(_snake_case , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def _snake_case ( _snake_case : Tuple ): if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(_snake_case ) # upper half reverse_floyd(_snake_case ) # lower half if __name__ == "__main__": print(R'''| /\ | |- | |- |--| |\ /| |-''') print(R'''|/ \| |- |_ |_ |__| | \/ | |_''') snake_case__ : Any = 1 while K: snake_case__ : Any = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) snake_case__ : str = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
314
"""simple docstring""" def _snake_case ( _snake_case : int = 50000000 ): lowerCAmelCase : List[str] = set() lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) ) lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) ) for primea in primes: lowerCAmelCase : Optional[Any] = primea * primea for primea in primes: lowerCAmelCase : List[Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase : Tuple = primea * primea * primea * primea lowerCAmelCase : Tuple = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f"""{solution() = }""")
314
1
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = RoCBertTokenizer __UpperCamelCase = None __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = filter_non_english def lowerCamelCase__ ( self : Dict ): super().setUp() lowerCAmelCase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d'''] lowerCAmelCase : List[str] = {} lowerCAmelCase : List[Any] = {} for i, value in enumerate(UpperCamelCase_ ): lowerCAmelCase : Dict = i lowerCAmelCase : List[str] = i lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] ) lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer: json.dump(UpperCamelCase_ , UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer: json.dump(UpperCamelCase_ , UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCAmelCase : List[Any] = tokenizer.tokenize('''你好[SEP]你是谁''' ) self.assertListEqual(UpperCamelCase_ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[Any] = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] lowerCAmelCase : Optional[int] = {} for i, token in enumerate(UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = i lowerCAmelCase : Dict = RoCBertWordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def lowerCamelCase__ ( self : Tuple ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def lowerCamelCase__ ( self : Dict ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def lowerCamelCase__ ( self : List[str] ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[str] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) if self.test_rust_tokenizer: lowerCAmelCase : Any = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) def lowerCamelCase__ ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : int = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' lowerCAmelCase : int = tokenizer_r.encode_plus( UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , ) lowerCAmelCase : str = tokenizer_r.do_lower_case if hasattr(UpperCamelCase_ , '''do_lower_case''' ) else False lowerCAmelCase : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Union[str, Any] = ['''的''', '''人''', '''有'''] lowerCAmelCase : Tuple = ''''''.join(UpperCamelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase : List[str] = True lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ ) lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = False lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : str = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : str = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Dict = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase : List[str] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCamelCase_ ) ] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCAmelCase : Union[str, Any] = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowerCAmelCase : List[Any] = '''你好,你是谁''' lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase_ ) lowerCAmelCase : List[str] = tokenizer.prepare_for_model( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Tuple = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor'''] snake_case__ : List[Any] = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] snake_case__ : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor snake_case__ : Tuple = logging.get_logger(__name__) class snake_case_( a__ ): def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ): warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
314
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
1
"""simple docstring""" from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _snake_case ( _snake_case : Optional[Any] ): if isinstance(_snake_case , collections.abc.Iterable ): return x return (x, x) @require_tf class snake_case_: def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ): pass def lowerCamelCase__ ( self : Union[str, Any] ): pass def lowerCamelCase__ ( self : List[Any] ): pass def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(UpperCamelCase_ ) lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : List[str] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ ) lowerCAmelCase : List[Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=None , **UpperCamelCase_ : Dict ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = {'''vision_model''': vision_model, '''text_model''': text_model} lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_ ) lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : List[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Dict = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ ) lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : List[str] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : str = after_output[0].numpy() lowerCAmelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=None , **UpperCamelCase_ : List[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ ) lowerCAmelCase : Tuple = model( input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase : Union[str, Any] = to_atuple(vision_model.config.image_size ) lowerCAmelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size ) lowerCAmelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase : List[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase : Dict = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float ): lowerCAmelCase : str = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase_ , UpperCamelCase_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : List[str] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : str = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Any = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Any = self.get_pretrained_model_and_inputs() lowerCAmelCase : str = model_a(**UpperCamelCase_ ) lowerCAmelCase : Dict = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Any = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = model_a(**UpperCamelCase_ ) lowerCAmelCase : Tuple = after_outputs[0].numpy() lowerCAmelCase : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) @require_tf class snake_case_( a__ , unittest.TestCase ): def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' ) lowerCAmelCase : Optional[int] = 1_3 lowerCAmelCase : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4] ) lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): lowerCAmelCase : List[str] = TFViTModel(UpperCamelCase_ , name='''vision_model''' ) lowerCAmelCase : Optional[int] = TFBertModel(UpperCamelCase_ , name='''text_model''' ) return vision_model, text_model def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[int] = TFViTModelTester(self ) lowerCAmelCase : Tuple = TFBertModelTester(self ) lowerCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase : List[Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[Any] = vision_config_and_inputs ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class snake_case_( a__ , unittest.TestCase ): def lowerCamelCase__ ( self : Dict ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' ) lowerCAmelCase : str = 1_3 lowerCAmelCase : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase : List[str] = random_attention_mask([batch_size, 4] ) lowerCAmelCase : Any = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : int ): lowerCAmelCase, lowerCAmelCase : List[str] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model( input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCAmelCase : Dict = to_atuple(vision_model.config.image_size ) lowerCAmelCase : str = to_atuple(vision_model.config.patch_size ) lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase : Union[str, Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ): lowerCAmelCase : List[Any] = TFDeiTModel(UpperCamelCase_ , name='''vision_model''' ) lowerCAmelCase : int = TFRobertaModel(UpperCamelCase_ , name='''text_model''' ) return vision_model, text_model def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = TFDeiTModelTester(self ) lowerCAmelCase : Optional[int] = TFRobertaModelTester(self ) lowerCAmelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = vision_config_and_inputs ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class snake_case_( a__ , unittest.TestCase ): def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' ) lowerCAmelCase : Tuple = 1_3 lowerCAmelCase : Dict = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowerCamelCase__ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): lowerCAmelCase : List[str] = TFCLIPVisionModel(UpperCamelCase_ , name='''vision_model''' ) lowerCAmelCase : List[Any] = TFBertModel(UpperCamelCase_ , name='''text_model''' ) return vision_model, text_model def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = TFCLIPVisionModelTester(self ) lowerCAmelCase : str = TFBertModelTester(self ) lowerCAmelCase : List[str] = clip_model_tester.prepare_config_and_inputs() lowerCAmelCase : int = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase : Union[str, Any] = vision_config_and_inputs ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class snake_case_( unittest.TestCase ): @slow def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=UpperCamelCase_ ) lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase : int = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' ) lowerCAmelCase : Tuple = model(**UpperCamelCase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCAmelCase : Optional[int] = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , UpperCamelCase_ , atol=1E-3 ) )
314
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : List[Any] = set() for token in tokens: lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : List[str] = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Optional[Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : str = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j] lowerCAmelCase : Union[str, Any] = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : int = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : Any = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Dict ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[str] = f.readlines() lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : int = parser.parse_args() main(args)
314
1
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_( unittest.TestCase ): def __init__( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : str=3 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : List[str]=1_0 , UpperCamelCase_ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase_ : Any=[1, 1, 2, 1] , UpperCamelCase_ : int=True , UpperCamelCase_ : str=True , UpperCamelCase_ : str="relu" , UpperCamelCase_ : int=3 , UpperCamelCase_ : Optional[Any]=None , ): lowerCAmelCase : List[str] = parent lowerCAmelCase : Optional[int] = batch_size lowerCAmelCase : List[Any] = image_size lowerCAmelCase : List[str] = num_channels lowerCAmelCase : Optional[int] = embeddings_size lowerCAmelCase : Any = hidden_sizes lowerCAmelCase : Optional[int] = depths lowerCAmelCase : int = is_training lowerCAmelCase : List[str] = use_labels lowerCAmelCase : int = hidden_act lowerCAmelCase : int = num_labels lowerCAmelCase : str = scope lowerCAmelCase : Optional[int] = len(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values def lowerCamelCase__ ( self : Tuple ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Dict = FlaxRegNetModel(config=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Tuple = self.num_labels lowerCAmelCase : Dict = FlaxRegNetForImageClassification(config=UpperCamelCase_ ) lowerCAmelCase : List[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[str] = self.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase : str = config_and_inputs lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = FlaxRegNetModelTester(self ) lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self : List[str] ): return def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def lowerCamelCase__ ( self : Any ): pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def lowerCamelCase__ ( self : Any ): pass def lowerCamelCase__ ( self : int ): lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : List[Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()] lowerCAmelCase : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): def check_hidden_states_output(UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : List[Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Tuple = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) @jax.jit def model_jitted(UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : Union[str, Any] = model_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Dict = model_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : Tuple ): return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowerCAmelCase : Tuple = self.default_image_processor lowerCAmelCase : Dict = prepare_img() lowerCAmelCase : List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''np''' ) lowerCAmelCase : Optional[Any] = model(**UpperCamelCase_ ) # verify the logits lowerCAmelCase : int = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
314
"""simple docstring""" import numpy as np from PIL import Image def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Dict = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : int = 0 lowerCAmelCase : Dict = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Union[str, Any] = 0 # compute the shape of the output matrix lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : int = 0 lowerCAmelCase : Tuple = 0 return updated_arr def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ): lowerCAmelCase : Union[str, Any] = np.array(_snake_case ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 # compute the shape of the output matrix lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase : str = 0 lowerCAmelCase : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image snake_case__ : Optional[Any] = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
314
1
"""simple docstring""" import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params snake_case__ : Union[str, Any] = getLogger(__name__) snake_case__ : str = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _snake_case ( _snake_case : List[str] , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : str = DEFAULT_DEVICE , _snake_case : List[Any]=False , _snake_case : int="summarization" , _snake_case : List[Any]=None , **_snake_case : Any , ): lowerCAmelCase : Tuple = Path(_snake_case ).open('''w''' , encoding='''utf-8''' ) lowerCAmelCase : Any = str(_snake_case ) lowerCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case ) if fpaa: lowerCAmelCase : Optional[int] = model.half() lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(_snake_case ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. lowerCAmelCase : Optional[Any] = time.time() # update config with task specific params use_task_specific_params(_snake_case , _snake_case ) if prefix is None: lowerCAmelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ): lowerCAmelCase : Union[str, Any] = [prefix + text for text in examples_chunk] lowerCAmelCase : Optional[Any] = tokenizer(_snake_case , return_tensors='''pt''' , truncation=_snake_case , padding='''longest''' ).to(_snake_case ) lowerCAmelCase : List[str] = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , ) lowerCAmelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() lowerCAmelCase : Any = int(time.time() - start_time ) # seconds lowerCAmelCase : List[Any] = len(_snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _snake_case ( ): return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def _snake_case ( _snake_case : Tuple=True ): lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=_snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=_snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=_snake_case , required=_snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=_snake_case , required=_snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=_snake_case , default=-1 , required=_snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=_snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate lowerCAmelCase, lowerCAmelCase : Union[str, Any] = parser.parse_known_args() lowerCAmelCase : List[str] = parse_numeric_n_bool_cl_kwargs(_snake_case ) if parsed_args and verbose: print(f'''parsed the following generate kwargs: {parsed_args}''' ) lowerCAmelCase : Optional[Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: lowerCAmelCase : int = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=_snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) lowerCAmelCase : Optional[Any] = generate_summaries_or_translations( _snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , ) if args.reference_path is None: return {} # Compute scores lowerCAmelCase : List[Any] = calculate_bleu if '''translation''' in args.task else calculate_rouge lowerCAmelCase : str = [x.rstrip() for x in open(args.save_path ).readlines()] lowerCAmelCase : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )] lowerCAmelCase : dict = score_fn(_snake_case , _snake_case ) scores.update(_snake_case ) if args.dump_args: scores.update(_snake_case ) if args.info: lowerCAmelCase : Dict = args.info if verbose: print(_snake_case ) if args.score_path is not None: json.dump(_snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
314
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Dict = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
"""simple docstring""" import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class snake_case_( unittest.TestCase ): def __init__( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 3_2 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCamelCase_ : bool = True , UpperCamelCase_ : List[str]=7 , UpperCamelCase_ : Optional[int]=3_0 , UpperCamelCase_ : str=4_0_0 , UpperCamelCase_ : Tuple=3 , ): lowerCAmelCase : int = parent lowerCAmelCase : List[str] = do_resize lowerCAmelCase : int = size if size is not None else {'''shortest_edge''': 2_8_8} lowerCAmelCase : List[str] = size_divisor lowerCAmelCase : List[str] = do_rescale lowerCAmelCase : Tuple = rescale_factor lowerCAmelCase : Tuple = do_normalize lowerCAmelCase : List[str] = do_center_crop lowerCAmelCase : str = image_mean lowerCAmelCase : Union[str, Any] = image_std lowerCAmelCase : Dict = do_pad lowerCAmelCase : Optional[int] = batch_size lowerCAmelCase : Optional[Any] = num_channels lowerCAmelCase : List[Any] = min_resolution lowerCAmelCase : List[Any] = max_resolution def lowerCamelCase__ ( self : Union[str, Any] ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=False ): if not batched: lowerCAmelCase : Tuple = self.size['''shortest_edge'''] lowerCAmelCase : int = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image.size else: lowerCAmelCase, lowerCAmelCase : List[Any] = image.shape[1], image.shape[2] lowerCAmelCase : Any = size / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : str = scale * h, size lowerCAmelCase : Tuple = int((1_3_3_3 / 8_0_0) * size ) if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size: lowerCAmelCase : Any = max_size / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = newh * scale lowerCAmelCase : Optional[int] = neww * scale lowerCAmelCase, lowerCAmelCase : List[Any] = int(newh + 0.5 ), int(neww + 0.5 ) lowerCAmelCase, lowerCAmelCase : Tuple = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCAmelCase : List[str] = [] for image in image_inputs: lowerCAmelCase, lowerCAmelCase : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase : Any = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] lowerCAmelCase : Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = BridgeTowerImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Any = BridgeTowerImageProcessingTester(self ) @property def lowerCamelCase__ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size_divisor''' ) ) def lowerCamelCase__ ( self : Any ): pass def lowerCamelCase__ ( self : Union[str, Any] ): # Initialize image processor lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase, lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase : Dict = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values lowerCAmelCase, lowerCAmelCase : str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : List[str] ): # Initialize image processor lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values lowerCAmelCase, lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : List[str] ): # Initialize image processor lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase, lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values lowerCAmelCase, lowerCAmelCase : str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1