code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def lowerCamelCase_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> bool: # Base Case if curr_ind == len(__SCREAMING_SNAKE_CASE ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__SCREAMING_SNAKE_CASE ) ): if valid_connection(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Insert current vertex into path as next transition _SCREAMING_SNAKE_CASE : Dict = next_ver # Validate created path if util_hamilton_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , curr_ind + 1 ): return True # Backtrack _SCREAMING_SNAKE_CASE : int = -1 return False def lowerCamelCase_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 )-> list[int]: _SCREAMING_SNAKE_CASE : Tuple = [-1] * (len(__SCREAMING_SNAKE_CASE ) + 1) # initialize start and end of path with starting index _SCREAMING_SNAKE_CASE : int = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 ) else []
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') lowerCAmelCase_ = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: lowerCAmelCase_ = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowerCAmelCase_ = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
717
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
0
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : """simple docstring""" def __init__( self : str , _A : Tuple , _A : Optional[int]=1_3 , _A : List[Any]=[3_0, 3_0] , _A : List[str]=2 , _A : Dict=3 , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=3_2 , _A : Tuple=5 , _A : Union[str, Any]=4 , _A : Union[str, Any]=3_7 , _A : Optional[Any]="gelu" , _A : Tuple=0.1 , _A : int=0.1 , _A : List[str]=1_0 , _A : Dict=0.02 , _A : List[Any]=3 , _A : List[Any]=None , _A : Dict=8 , _A : Optional[Any]=1_0 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = parent _SCREAMING_SNAKE_CASE : int = batch_size _SCREAMING_SNAKE_CASE : Optional[int] = image_size _SCREAMING_SNAKE_CASE : Optional[int] = patch_size _SCREAMING_SNAKE_CASE : str = num_channels _SCREAMING_SNAKE_CASE : Optional[Any] = is_training _SCREAMING_SNAKE_CASE : List[Any] = use_labels _SCREAMING_SNAKE_CASE : Tuple = hidden_size _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads _SCREAMING_SNAKE_CASE : List[Any] = intermediate_size _SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size _SCREAMING_SNAKE_CASE : Any = initializer_range _SCREAMING_SNAKE_CASE : Optional[int] = num_labels _SCREAMING_SNAKE_CASE : Dict = scope _SCREAMING_SNAKE_CASE : Tuple = n_targets _SCREAMING_SNAKE_CASE : List[str] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens _SCREAMING_SNAKE_CASE : Union[str, Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) _SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1 + self.num_detection_tokens def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) _SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) _SCREAMING_SNAKE_CASE : Optional[Any] = [] for i in range(self.batch_size): _SCREAMING_SNAKE_CASE : Union[str, Any] = {} _SCREAMING_SNAKE_CASE : List[Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A) labels.append(_A) _SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : List[str]): """simple docstring""" return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _lowerCAmelCase ( self : Tuple , _A : Tuple , _A : Union[str, Any] , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = YolosModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : List[str] = model(_A) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size)) def _lowerCAmelCase ( self : List[Any] , _A : int , _A : Any , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = YolosForObjectDetection(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : int = model(pixel_values=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) _SCREAMING_SNAKE_CASE : str = model(pixel_values=_A , labels=_A) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs _SCREAMING_SNAKE_CASE : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = (YolosModel, YolosForObjectDetection) if is_torch_available() else () a = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) a = False a = False a = False a = False def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : Dict=False): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_A , _A , return_labels=_A) if return_labels: if model_class.__name__ == "YolosForObjectDetection": _SCREAMING_SNAKE_CASE : str = [] for i in range(self.model_tester.batch_size): _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long) _SCREAMING_SNAKE_CASE : Tuple = torch.ones( self.model_tester.n_targets , 4 , device=_A , dtype=torch.float) labels.append(_A) _SCREAMING_SNAKE_CASE : int = labels return inputs_dict def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = YolosModelTester(self) _SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7) def _lowerCAmelCase ( self : Tuple): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" pass def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[int] = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear)) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : List[str] = model_class(_A) _SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : str = True # in YOLOS, the seq_len is different _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.expected_seq_len for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : int = outputs.attentions self.assertEqual(len(_A) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : List[Any] = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : Tuple = outputs.attentions self.assertEqual(len(_A) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) _SCREAMING_SNAKE_CASE : Tuple = len(_A) # Check attention is always last and order is fine _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : int = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 self.assertEqual(out_len + added_hidden_states , len(_A)) _SCREAMING_SNAKE_CASE : int = outputs.attentions self.assertEqual(len(_A) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" def check_hidden_states_output(_A : Optional[Any] , _A : Optional[int] , _A : str): _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states _SCREAMING_SNAKE_CASE : int = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_A) , _A) # YOLOS has a different seq_length _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(_A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(_A , _A , _A) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_A) @slow def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : List[str] = YolosModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""") if is_vision_available() else None @slow def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor _SCREAMING_SNAKE_CASE : Any = prepare_img() _SCREAMING_SNAKE_CASE : Any = image_processor(images=_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Union[str, Any] = model(inputs.pixel_values) # verify outputs _SCREAMING_SNAKE_CASE : str = torch.Size((1, 1_0_0, 9_2)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=_A) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4)) # verify postprocessing _SCREAMING_SNAKE_CASE : Optional[int] = image_processor.post_process_object_detection( _A , threshold=0.3 , target_sizes=[image.size[::-1]])[0] _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861]).to(_A) _SCREAMING_SNAKE_CASE : Optional[int] = [7_5, 7_5, 1_7, 6_3, 1_7] _SCREAMING_SNAKE_CASE : str = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495]).to(_A) self.assertEqual(len(results["""scores"""]) , 5) self.assertTrue(torch.allclose(results["""scores"""] , _A , atol=1e-4)) self.assertSequenceEqual(results["""labels"""].tolist() , _A) self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _A))
718
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
0
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _snake_case ( ctypes.Structure ): """simple docstring""" a = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCamelCase_()-> Any: if os.name == "nt": _SCREAMING_SNAKE_CASE : List[str] = CursorInfo() _SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase_()-> Tuple: if os.name == "nt": _SCREAMING_SNAKE_CASE : Union[str, Any] = CursorInfo() _SCREAMING_SNAKE_CASE : Any = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase_()-> List[Any]: try: hide_cursor() yield finally: show_cursor()
719
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
0
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase_()-> Iterator[int]: _SCREAMING_SNAKE_CASE : List[str] = 2 while True: if is_prime(__SCREAMING_SNAKE_CASE ): yield num num += 1 def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 2_000_000 )-> int: return sum(takewhile(lambda __SCREAMING_SNAKE_CASE : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"{solution() = }")
720
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
0
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _snake_case : """simple docstring""" @staticmethod def _lowerCAmelCase ( *_A : List[str] , **_A : Tuple): """simple docstring""" pass def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : List[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : List[Any] = np.array(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = npimg.shape return {"hash": hashimage(__SCREAMING_SNAKE_CASE ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _lowerCAmelCase ( self : int , _A : List[Any] , _A : Optional[int] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = MaskGenerationPipeline(model=_A , image_processor=_A) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : str): """simple docstring""" pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""") def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" pass @slow @require_torch def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""") _SCREAMING_SNAKE_CASE : List[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_5_6) # Shortening by hashing _SCREAMING_SNAKE_CASE : Tuple = [] for i, o in enumerate(outputs["""masks"""]): new_outupt += [{"mask": mask_to_test_readable(_A), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_871} ] , ) # fmt: on @require_torch @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """facebook/sam-vit-huge""" _SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""mask-generation""" , model=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_5_6) # Shortening by hashing _SCREAMING_SNAKE_CASE : Any = [] for i, o in enumerate(outputs["""masks"""]): new_outupt += [{"mask": mask_to_test_readable(_A), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_053}, ] , )
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = '''bert-base-cased''' lowerCAmelCase_ = '''google/pegasus-xsum''' lowerCAmelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCAmelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCAmelCase_ = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase_ = '''sshleifer/bart-tiny-random''' lowerCAmelCase_ = '''sshleifer/tiny-mbart''' lowerCAmelCase_ = '''sshleifer/tiny-marian-en-de''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = """\n""".join(__SCREAMING_SNAKE_CASE ) Path(__SCREAMING_SNAKE_CASE ).open("""w""" ).writelines(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: for split in ["train", "val", "test"]: _dump_articles(os.path.join(__SCREAMING_SNAKE_CASE , F"""{split}.source""" ) , __SCREAMING_SNAKE_CASE ) _dump_articles(os.path.join(__SCREAMING_SNAKE_CASE , F"""{split}.target""" ) , __SCREAMING_SNAKE_CASE ) return tmp_dir class _snake_case ( __snake_case ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(_A) _SCREAMING_SNAKE_CASE : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) _SCREAMING_SNAKE_CASE : List[str] = max(len(tokenizer.encode(_A)) for a in ARTICLES) _SCREAMING_SNAKE_CASE : str = max(len(tokenizer.encode(_A)) for a in SUMMARIES) _SCREAMING_SNAKE_CASE : List[Any] = 4 _SCREAMING_SNAKE_CASE : Tuple = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _SCREAMING_SNAKE_CASE : int = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error. _SCREAMING_SNAKE_CASE : Optional[int] = SeqaSeqDataset( _A , data_dir=_A , type_path="""train""" , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(_A , _A) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def _lowerCAmelCase ( self : List[str] , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(_A) _SCREAMING_SNAKE_CASE : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) _SCREAMING_SNAKE_CASE : Optional[int] = max(len(tokenizer.encode(_A)) for a in ARTICLES) _SCREAMING_SNAKE_CASE : List[Any] = max(len(tokenizer.encode(_A)) for a in SUMMARIES) _SCREAMING_SNAKE_CASE : Dict = 4 _SCREAMING_SNAKE_CASE : Dict = LegacySeqaSeqDataset( _A , data_dir=_A , type_path="""train""" , max_source_length=2_0 , max_target_length=_A , ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""") _SCREAMING_SNAKE_CASE : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) _SCREAMING_SNAKE_CASE : List[str] = tmp_dir.joinpath("""train.source""").open().readlines() _SCREAMING_SNAKE_CASE : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(_A , _A , 1_2_8 , _A) _SCREAMING_SNAKE_CASE : Union[str, Any] = {x.name for x in tmp_dir.iterdir()} _SCREAMING_SNAKE_CASE : Optional[Any] = {x.name for x in save_dir.iterdir()} _SCREAMING_SNAKE_CASE : Tuple = save_dir.joinpath("""train.source""").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_A) < len(_A) assert len(_A) == 1 assert len(packed_examples[0]) == sum(len(_A) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""") def _lowerCAmelCase ( self : Dict): """simple docstring""" if not FAIRSEQ_AVAILABLE: return _SCREAMING_SNAKE_CASE : str = self._get_dataset(max_len=6_4) _SCREAMING_SNAKE_CASE : Optional[int] = 6_4 _SCREAMING_SNAKE_CASE : List[str] = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = [len(_A) for x in batch_sampler] assert len(set(_A)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_A) == len(_A) # no dropped or added examples _SCREAMING_SNAKE_CASE : Any = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2) _SCREAMING_SNAKE_CASE : str = [] _SCREAMING_SNAKE_CASE : Optional[Any] = [] for batch in data_loader: _SCREAMING_SNAKE_CASE : Optional[Any] = batch["""input_ids"""].shape _SCREAMING_SNAKE_CASE : Tuple = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _SCREAMING_SNAKE_CASE : Dict = np.product(batch["""input_ids"""].shape) num_src_per_batch.append(_A) if num_src_tokens > (max_tokens * 1.1): failures.append(_A) assert num_src_per_batch[0] == max(_A) if failures: raise AssertionError(f"""too many tokens in {len(_A)} batches""") def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_dataset(max_len=5_1_2) _SCREAMING_SNAKE_CASE : int = 2 _SCREAMING_SNAKE_CASE : Tuple = ds.make_sortish_sampler(_A , shuffle=_A) _SCREAMING_SNAKE_CASE : int = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A) _SCREAMING_SNAKE_CASE : Tuple = tokenizer.pad_token_id def count_pad_tokens(_A : Tuple , _A : Union[str, Any]="input_ids"): return [batch[k].eq(_A).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_A , k="""labels""")) < sum(count_pad_tokens(_A , k="""labels""")) assert sum(count_pad_tokens(_A)) < sum(count_pad_tokens(_A)) assert len(_A) == len(_A) def _lowerCAmelCase ( self : List[str] , _A : List[Any]=1_0_0_0 , _A : List[str]=1_2_8): """simple docstring""" if os.getenv("""USE_REAL_DATA""" , _A): _SCREAMING_SNAKE_CASE : Optional[int] = """examples/seq2seq/wmt_en_ro""" _SCREAMING_SNAKE_CASE : str = max_len * 2 * 6_4 if not Path(_A).joinpath("""train.len""").exists(): save_len_file(_A , _A) else: _SCREAMING_SNAKE_CASE : Union[str, Any] = """examples/seq2seq/test_data/wmt_en_ro""" _SCREAMING_SNAKE_CASE : List[Any] = max_len * 4 save_len_file(_A , _A) _SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(_A) _SCREAMING_SNAKE_CASE : List[str] = SeqaSeqDataset( _A , data_dir=_A , type_path="""train""" , max_source_length=_A , max_target_length=_A , n_obs=_A , ) return ds, max_tokens, tokenizer def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self._get_dataset() _SCREAMING_SNAKE_CASE : List[str] = set(DistributedSortishSampler(_A , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_A)) _SCREAMING_SNAKE_CASE : List[Any] = set(DistributedSortishSampler(_A , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_A)) assert idsa.intersection(_A) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def _lowerCAmelCase ( self : Tuple , _A : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(_A , use_fast=_A) if tok_name == MBART_TINY: _SCREAMING_SNAKE_CASE : str = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , ) _SCREAMING_SNAKE_CASE : int = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _SCREAMING_SNAKE_CASE : Any = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path="""train""" , max_source_length=4 , max_target_length=8 , ) _SCREAMING_SNAKE_CASE : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_A) == 1 if tok_name == BART_TINY else len(_A) == 0
700
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = TypeVar('''DatasetType''', Dataset, IterableDataset) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "first_exhausted" , )-> DatasetType: from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(__SCREAMING_SNAKE_CASE ): if not isinstance(__SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ): if isinstance(__SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ """is an empty dataset dictionary.""" ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(__SCREAMING_SNAKE_CASE )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__SCREAMING_SNAKE_CASE ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__SCREAMING_SNAKE_CASE ).__name__}.""" ) if i == 0: _SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset) ) elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , stopping_strategy=__SCREAMING_SNAKE_CASE ) else: return _interleave_iterable_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , stopping_strategy=__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , )-> DatasetType: if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(__SCREAMING_SNAKE_CASE ): if not isinstance(__SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ): if isinstance(__SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ """is an empty dataset dictionary.""" ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(__SCREAMING_SNAKE_CASE )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__SCREAMING_SNAKE_CASE ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__SCREAMING_SNAKE_CASE ).__name__}.""" ) if i == 0: _SCREAMING_SNAKE_CASE : List[str] = ( (Dataset, IterableDataset) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset) ) elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , axis=__SCREAMING_SNAKE_CASE ) else: return _concatenate_iterable_datasets(__SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , axis=__SCREAMING_SNAKE_CASE )
701
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 100 )-> int: _SCREAMING_SNAKE_CASE : Any = set() _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = n + 1 # maximum limit for a in range(2 , __SCREAMING_SNAKE_CASE ): for b in range(2 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = a**b # calculates the current power collect_powers.add(__SCREAMING_SNAKE_CASE ) # adds the result to the set return len(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
702
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
0
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests lowerCAmelCase_ = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowerCAmelCase_ = BASE_URL + '''/user''' # https://github.com/settings/tokens lowerCAmelCase_ = os.environ.get('''USER_TOKEN''', '''''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> dict[Any, Any]: _SCREAMING_SNAKE_CASE : int = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"{key}: {value}") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "AutoImageProcessor" a = "AutoTokenizer" def __init__( self : int , _A : str=None , _A : Tuple=None , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : str = self.image_processor _SCREAMING_SNAKE_CASE : int = False def __call__( self : Optional[int] , *_A : Optional[int] , **_A : Optional[int]): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_A , **_A) _SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""images""" , _A) _SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""text""" , _A) if len(_A) > 0: _SCREAMING_SNAKE_CASE : Dict = args[0] _SCREAMING_SNAKE_CASE : Any = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""") if images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(_A , *_A , **_A) if text is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(_A , **_A) if text is None: return inputs elif images is None: return encodings else: _SCREAMING_SNAKE_CASE : Union[str, Any] = encodings["""input_ids"""] return inputs def _lowerCAmelCase ( self : int , *_A : Tuple , **_A : Dict): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Tuple , *_A : Optional[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @contextmanager def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""") _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer yield _SCREAMING_SNAKE_CASE : Dict = self.image_processor _SCREAMING_SNAKE_CASE : Dict = False def _lowerCAmelCase ( self : List[Any] , _A : str , _A : Any=False , _A : List[str]=None): """simple docstring""" if added_vocab is None: _SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.get_added_vocab() _SCREAMING_SNAKE_CASE : Optional[int] = {} while tokens: _SCREAMING_SNAKE_CASE : Union[str, Any] = re.search(r"""<s_(.*?)>""" , _A , re.IGNORECASE) if start_token is None: break _SCREAMING_SNAKE_CASE : Union[str, Any] = start_token.group(1) _SCREAMING_SNAKE_CASE : Tuple = re.search(rf"""</s_{key}>""" , _A , re.IGNORECASE) _SCREAMING_SNAKE_CASE : int = start_token.group() if end_token is None: _SCREAMING_SNAKE_CASE : str = tokens.replace(_A , """""") else: _SCREAMING_SNAKE_CASE : Any = end_token.group() _SCREAMING_SNAKE_CASE : List[str] = re.escape(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(_A) _SCREAMING_SNAKE_CASE : List[Any] = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , _A , re.IGNORECASE) if content is not None: _SCREAMING_SNAKE_CASE : str = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenajson(_A , is_inner_value=_A , added_vocab=_A) if value: if len(_A) == 1: _SCREAMING_SNAKE_CASE : Optional[Any] = value[0] _SCREAMING_SNAKE_CASE : Optional[int] = value else: # leaf nodes _SCREAMING_SNAKE_CASE : int = [] for leaf in content.split(r"""<sep/>"""): _SCREAMING_SNAKE_CASE : Optional[int] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _SCREAMING_SNAKE_CASE : List[str] = leaf[1:-2] # for categorical special tokens output[key].append(_A) if len(output[key]) == 1: _SCREAMING_SNAKE_CASE : str = output[key][0] _SCREAMING_SNAKE_CASE : Union[str, Any] = tokens[tokens.find(_A) + len(_A) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_A , added_vocab=_A) if len(_A): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _A , ) return self.image_processor
705
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Tuple = len(__SCREAMING_SNAKE_CASE ) for i in range(length - 1 ): _SCREAMING_SNAKE_CASE : str = i for k in range(i + 1 , __SCREAMING_SNAKE_CASE ): if collection[k] < collection[least]: _SCREAMING_SNAKE_CASE : Tuple = k if least != i: _SCREAMING_SNAKE_CASE : Union[str, Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": lowerCAmelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase_ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
707
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp lowerCAmelCase_ = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } lowerCAmelCase_ = { '''RUCAIBox/mvp''': 1024, } class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["input_ids", "attention_mask"] a = MvpTokenizer def __init__( self : Optional[Any] , _A : List[str]=None , _A : Optional[int]=None , _A : Optional[Any]=None , _A : Optional[int]="replace" , _A : Optional[Any]="<s>" , _A : Optional[Any]="</s>" , _A : str="</s>" , _A : Union[str, Any]="<s>" , _A : Union[str, Any]="<unk>" , _A : Any="<pad>" , _A : Optional[Any]="<mask>" , _A : int=False , _A : Optional[int]=True , **_A : Optional[Any] , ): """simple docstring""" super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) _SCREAMING_SNAKE_CASE : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , _A) != add_prefix_space: _SCREAMING_SNAKE_CASE : List[Any] = getattr(_A , pre_tok_state.pop("""type""")) _SCREAMING_SNAKE_CASE : str = add_prefix_space _SCREAMING_SNAKE_CASE : int = pre_tok_class(**_A) _SCREAMING_SNAKE_CASE : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _SCREAMING_SNAKE_CASE : Tuple = """post_processor""" _SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.backend_tokenizer , _A , _A) if tokenizer_component_instance: _SCREAMING_SNAKE_CASE : Dict = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _SCREAMING_SNAKE_CASE : Any = tuple(state["""sep"""]) if "cls" in state: _SCREAMING_SNAKE_CASE : str = tuple(state["""cls"""]) _SCREAMING_SNAKE_CASE : Any = False if state.get("""add_prefix_space""" , _A) != add_prefix_space: _SCREAMING_SNAKE_CASE : str = add_prefix_space _SCREAMING_SNAKE_CASE : Dict = True if state.get("""trim_offsets""" , _A) != trim_offsets: _SCREAMING_SNAKE_CASE : List[Any] = trim_offsets _SCREAMING_SNAKE_CASE : Tuple = True if changes_to_apply: _SCREAMING_SNAKE_CASE : str = getattr(_A , state.pop("""type""")) _SCREAMING_SNAKE_CASE : int = component_class(**_A) setattr(self.backend_tokenizer , _A , _A) @property def _lowerCAmelCase ( self : int): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _lowerCAmelCase ( self : Dict , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : str = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else value _SCREAMING_SNAKE_CASE : Any = value def _lowerCAmelCase ( self : List[str] , *_A : Dict , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = kwargs.get("""is_split_into_words""" , _A) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*_A , **_A) def _lowerCAmelCase ( self : str , *_A : Optional[Any] , **_A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.get("""is_split_into_words""" , _A) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""") return super()._encode_plus(*_A , **_A) def _lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Optional[str] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(_A , name=_A) return tuple(_A) def _lowerCAmelCase ( self : Any , _A : List[str] , _A : List[str]=None): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
708
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
0
"""simple docstring""" class _snake_case ( __snake_case ): """simple docstring""" pass class _snake_case ( __snake_case ): """simple docstring""" pass class _snake_case : """simple docstring""" def __init__( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [ [], [], [], ] def _lowerCAmelCase ( self : int , _A : int , _A : int): """simple docstring""" try: if len(self.queues[priority]) >= 1_0_0: raise OverflowError("""Maximum queue size is 100""") self.queues[priority].append(_A) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""") def _lowerCAmelCase ( self : List[Any]): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0) raise UnderFlowError("""All queues are empty""") def __str__( self : Union[str, Any]): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues)) class _snake_case : """simple docstring""" def __init__( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [] def _lowerCAmelCase ( self : List[str] , _A : int): """simple docstring""" if len(self.queue) == 1_0_0: raise OverFlowError("""Maximum queue size is 100""") self.queue.append(_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""") else: _SCREAMING_SNAKE_CASE : Tuple = min(self.queue) self.queue.remove(_A) return data def __str__( self : int): """simple docstring""" return str(self.queue) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(__SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : Tuple = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(__SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
709
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
0
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.Iterable ): return x return (x, x) @require_flax class _snake_case : """simple docstring""" def _lowerCAmelCase ( self : Tuple , _A : Tuple , _A : str): """simple docstring""" pass def _lowerCAmelCase ( self : List[Any]): """simple docstring""" pass def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" pass def _lowerCAmelCase ( self : int , _A : np.ndarray , _A : np.ndarray , _A : float): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = np.abs((a - b)).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""") def _lowerCAmelCase ( self : Any , _A : List[str] , _A : Dict , _A : str , _A : List[Any] , _A : Dict=None , **_A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) _SCREAMING_SNAKE_CASE : str = FlaxVisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : str = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim)) def _lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : str , _A : Optional[Any] , _A : Dict , _A : Optional[Any]=None , **_A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.get_vision_text_model(_A , _A) _SCREAMING_SNAKE_CASE : Dict = {"""vision_model""": vision_model, """text_model""": text_model} _SCREAMING_SNAKE_CASE : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim)) def _lowerCAmelCase ( self : Tuple , _A : Tuple , _A : List[Any] , _A : Dict , _A : List[str] , _A : int=None , **_A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.get_vision_text_model(_A , _A) _SCREAMING_SNAKE_CASE : int = {"""vision_model""": vision_model, """text_model""": text_model} _SCREAMING_SNAKE_CASE : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) _SCREAMING_SNAKE_CASE : List[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) _SCREAMING_SNAKE_CASE : str = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A) _SCREAMING_SNAKE_CASE : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_A) _SCREAMING_SNAKE_CASE : str = model(input_ids=_A , pixel_values=_A , attention_mask=_A) _SCREAMING_SNAKE_CASE : List[Any] = after_output[0] _SCREAMING_SNAKE_CASE : Optional[int] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1e-3) def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None , **_A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_vision_text_model(_A , _A) _SCREAMING_SNAKE_CASE : str = {"""vision_model""": vision_model, """text_model""": text_model} _SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) _SCREAMING_SNAKE_CASE : List[Any] = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A) _SCREAMING_SNAKE_CASE : Dict = output.vision_model_output.attentions self.assertEqual(len(_A) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _SCREAMING_SNAKE_CASE : Tuple = to_atuple(vision_model.config.image_size) _SCREAMING_SNAKE_CASE : List[str] = to_atuple(vision_model.config.patch_size) _SCREAMING_SNAKE_CASE : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) _SCREAMING_SNAKE_CASE : Dict = output.text_model_output.attentions self.assertEqual(len(_A) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[Any] , _A : Tuple): """simple docstring""" pt_model.to(_A) pt_model.eval() # prepare inputs _SCREAMING_SNAKE_CASE : Any = inputs_dict _SCREAMING_SNAKE_CASE : Dict = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = pt_model(**_A).to_tuple() _SCREAMING_SNAKE_CASE : Dict = fx_model(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A) _SCREAMING_SNAKE_CASE : int = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A) _SCREAMING_SNAKE_CASE : Dict = fx_model_loaded(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A) _SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A) pt_model_loaded.to(_A) pt_model_loaded.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[Any] = pt_model_loaded(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]): self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4e-2) def _lowerCAmelCase ( self : List[Any] , _A : int , _A : List[Any] , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) _SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxVisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A) _SCREAMING_SNAKE_CASE : List[Any] = fx_state self.check_pt_flax_equivalence(_A , _A , _A) def _lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = FlaxVisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : Any = load_flax_weights_in_pytorch_model(_A , fx_model.params) self.check_pt_flax_equivalence(_A , _A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() self.check_save_load(**_A) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A) @is_pt_flax_cross_test def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[int] = config_inputs_dict.pop("""vision_config""") _SCREAMING_SNAKE_CASE : Optional[int] = config_inputs_dict.pop("""text_config""") _SCREAMING_SNAKE_CASE : Union[str, Any] = config_inputs_dict self.check_equivalence_pt_to_flax(_A , _A , _A) self.check_equivalence_flax_to_pt(_A , _A , _A) @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.get_pretrained_model_and_inputs() _SCREAMING_SNAKE_CASE : List[Any] = model_a(**_A) _SCREAMING_SNAKE_CASE : Optional[int] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = model_a(**_A) _SCREAMING_SNAKE_CASE : List[Any] = after_outputs[0] _SCREAMING_SNAKE_CASE : Tuple = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1e-5) @require_flax class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_A , text_from_pt=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = 1_3 _SCREAMING_SNAKE_CASE : Dict = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) _SCREAMING_SNAKE_CASE : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) _SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([batch_size, 4]) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxViTModel(_A) _SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel(_A) return vision_model, text_model def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = FlaxViTModelTester(self) _SCREAMING_SNAKE_CASE : List[Any] = FlaxBertModelTester(self) _SCREAMING_SNAKE_CASE : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[Any] = bert_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Dict = vision_config_and_inputs _SCREAMING_SNAKE_CASE : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_A , text_from_pt=_A , ) _SCREAMING_SNAKE_CASE : Any = 1_3 _SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) _SCREAMING_SNAKE_CASE : Dict = random_attention_mask([batch_size, 4]) _SCREAMING_SNAKE_CASE : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : Any , _A : Optional[Any] , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FlaxCLIPVisionModel(_A) _SCREAMING_SNAKE_CASE : Dict = FlaxBertModel(_A) return vision_model, text_model def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxCLIPVisionModelTester(self) _SCREAMING_SNAKE_CASE : int = FlaxBertModelTester(self) _SCREAMING_SNAKE_CASE : Any = clip_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : str = bert_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[Any] = vision_config_and_inputs _SCREAMING_SNAKE_CASE : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0) _SCREAMING_SNAKE_CASE : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""") _SCREAMING_SNAKE_CASE : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") _SCREAMING_SNAKE_CASE : Any = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_A , padding=_A , return_tensors="""np""") _SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_A) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _SCREAMING_SNAKE_CASE : Dict = np.array([[1.2_284_727, 0.3_104_122]]) self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1e-3))
710
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
0
import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class _snake_case ( __snake_case , __snake_case ): """simple docstring""" a = 1 @register_to_config def __init__( self : Union[str, Any] , _A : Optional[int]=2_0_0_0 , _A : Any=0.1 , _A : Optional[Any]=2_0 , _A : Optional[Any]=1e-3): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : Any = None def _lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Union[str, torch.device] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = torch.linspace(1 , self.config.sampling_eps , _A , device=_A) def _lowerCAmelCase ( self : int , _A : List[str] , _A : List[Any] , _A : Union[str, Any] , _A : Any=None): """simple docstring""" if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""") # TODO(Patrick) better comments + non-PyTorch # postprocess model score _SCREAMING_SNAKE_CASE : Optional[Any] = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _SCREAMING_SNAKE_CASE : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) _SCREAMING_SNAKE_CASE : List[str] = std.flatten() while len(std.shape) < len(score.shape): _SCREAMING_SNAKE_CASE : Tuple = std.unsqueeze(-1) _SCREAMING_SNAKE_CASE : int = -score / std # compute _SCREAMING_SNAKE_CASE : Optional[int] = -1.0 / len(self.timesteps) _SCREAMING_SNAKE_CASE : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _SCREAMING_SNAKE_CASE : int = beta_t.flatten() while len(beta_t.shape) < len(x.shape): _SCREAMING_SNAKE_CASE : Union[str, Any] = beta_t.unsqueeze(-1) _SCREAMING_SNAKE_CASE : List[Any] = -0.5 * beta_t * x _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.sqrt(_A) _SCREAMING_SNAKE_CASE : int = drift - diffusion**2 * score _SCREAMING_SNAKE_CASE : Any = x + drift * dt # add noise _SCREAMING_SNAKE_CASE : List[str] = randn_tensor(x.shape , layout=x.layout , generator=_A , device=x.device , dtype=x.dtype) _SCREAMING_SNAKE_CASE : Optional[Any] = x_mean + diffusion * math.sqrt(-dt) * noise return x, x_mean def __len__( self : int): """simple docstring""" return self.config.num_train_timesteps
711
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
0
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase_ = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _snake_case : """simple docstring""" a = PegasusConfig a = {} a = "gelu" def __init__( self : List[str] , _A : str , _A : str=1_3 , _A : Dict=7 , _A : Tuple=True , _A : Tuple=False , _A : Any=9_9 , _A : Union[str, Any]=3_2 , _A : Dict=5 , _A : List[str]=4 , _A : Union[str, Any]=3_7 , _A : int=0.1 , _A : Union[str, Any]=0.1 , _A : Any=2_0 , _A : List[str]=2 , _A : Optional[int]=1 , _A : Optional[Any]=0 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = parent _SCREAMING_SNAKE_CASE : int = batch_size _SCREAMING_SNAKE_CASE : List[Any] = seq_length _SCREAMING_SNAKE_CASE : Tuple = is_training _SCREAMING_SNAKE_CASE : Tuple = use_labels _SCREAMING_SNAKE_CASE : Dict = vocab_size _SCREAMING_SNAKE_CASE : Tuple = hidden_size _SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : Tuple = eos_token_id _SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id _SCREAMING_SNAKE_CASE : Any = bos_token_id def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) _SCREAMING_SNAKE_CASE : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1) _SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _SCREAMING_SNAKE_CASE : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _SCREAMING_SNAKE_CASE : Any = prepare_pegasus_inputs_dict(_A , _A , _A) return config, inputs_dict def _lowerCAmelCase ( self : Dict , _A : Optional[int] , _A : int , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = 2_0 _SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(inputs_dict["""input_ids"""]) _SCREAMING_SNAKE_CASE : Optional[int] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _SCREAMING_SNAKE_CASE : Dict = model.init_cache(decoder_input_ids.shape[0] , _A , _A) _SCREAMING_SNAKE_CASE : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""") _SCREAMING_SNAKE_CASE : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _SCREAMING_SNAKE_CASE : List[str] = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) _SCREAMING_SNAKE_CASE : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") _SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_A , _A) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""") def _lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = 2_0 _SCREAMING_SNAKE_CASE : int = model_class_name(_A) _SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict["""input_ids"""]) _SCREAMING_SNAKE_CASE : Any = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _SCREAMING_SNAKE_CASE : Optional[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) _SCREAMING_SNAKE_CASE : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _SCREAMING_SNAKE_CASE : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) _SCREAMING_SNAKE_CASE : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") _SCREAMING_SNAKE_CASE : List[str] = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) _SCREAMING_SNAKE_CASE : List[str] = model.decode(_A , _A , decoder_attention_mask=_A) _SCREAMING_SNAKE_CASE : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""") def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , )-> Tuple: if attention_mask is None: _SCREAMING_SNAKE_CASE : int = np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () a = True a = False a = False a = False def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = FlaxPegasusModelTester(self) _SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_A , _A) _SCREAMING_SNAKE_CASE : str = model_class(_A) @jax.jit def encode_jitted(_A : str , _A : Tuple=None , **_A : Any): return model.encode(input_ids=_A , attention_mask=_A) with self.subTest("""JIT Enabled"""): _SCREAMING_SNAKE_CASE : Any = encode_jitted(**_A).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): _SCREAMING_SNAKE_CASE : List[Any] = encode_jitted(**_A).to_tuple() self.assertEqual(len(_A) , len(_A)) for jitted_output, output in zip(_A , _A): self.assertEqual(jitted_output.shape , output.shape) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _SCREAMING_SNAKE_CASE : Any = model_class(_A) _SCREAMING_SNAKE_CASE : str = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""]) _SCREAMING_SNAKE_CASE : List[str] = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[Any] , _A : Optional[int] , _A : int): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest("""JIT Enabled"""): _SCREAMING_SNAKE_CASE : Dict = decode_jitted(**_A).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): _SCREAMING_SNAKE_CASE : List[Any] = decode_jitted(**_A).to_tuple() self.assertEqual(len(_A) , len(_A)) for jitted_output, output in zip(_A , _A): self.assertEqual(jitted_output.shape , output.shape) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" for model_class_name in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=_A) _SCREAMING_SNAKE_CASE : Optional[int] = np.ones((1, 1)) _SCREAMING_SNAKE_CASE : List[str] = model(_A) self.assertIsNotNone(_A) @slow def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""") _SCREAMING_SNAKE_CASE : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""") _SCREAMING_SNAKE_CASE : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _SCREAMING_SNAKE_CASE : Any = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""np""" , truncation=_A , max_length=5_1_2 , padding=_A) _SCREAMING_SNAKE_CASE : int = model.generate(**_A , num_beams=2).sequences _SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_A , skip_special_tokens=_A) assert tgt_text == decoded
712
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
0
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : int = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ F"""{test_file} instead.""" ) _SCREAMING_SNAKE_CASE : Any = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) _SCREAMING_SNAKE_CASE : str = components[:-1] + [test_fn.replace(""".py""" , """""" )] _SCREAMING_SNAKE_CASE : Dict = """.""".join(__SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : str = get_module_path(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(__SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : List[str] = [] _SCREAMING_SNAKE_CASE : List[str] = get_test_module(__SCREAMING_SNAKE_CASE ) for attr in dir(__SCREAMING_SNAKE_CASE ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : List[Any] = get_test_module(__SCREAMING_SNAKE_CASE ) for attr in dir(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). _SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """all_model_classes""" , [] ) if len(__SCREAMING_SNAKE_CASE ) > 0: test_classes.append(__SCREAMING_SNAKE_CASE ) # sort with class names return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : str = get_test_classes(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = test_class() if hasattr(__SCREAMING_SNAKE_CASE , """setUp""" ): test.setUp() _SCREAMING_SNAKE_CASE : List[str] = None if hasattr(__SCREAMING_SNAKE_CASE , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: _SCREAMING_SNAKE_CASE : Optional[int] = test.model_tester.__class__ return model_tester def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Union[str, Any] = get_test_classes(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__SCREAMING_SNAKE_CASE ) # sort with class names return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = get_test_classes_for_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for test_class in test_classes: _SCREAMING_SNAKE_CASE : str = get_model_tester_from_test_class(__SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(__SCREAMING_SNAKE_CASE ) # sort with class names return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = get_test_classes(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = {test_class: get_model_tester_from_test_class(__SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : List[Any] = get_model_classes(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = { model_class: get_test_classes_for_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : List[str] = get_model_classes(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = { model_class: get_tester_classes_for_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return o elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(__SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return {to_json(__SCREAMING_SNAKE_CASE ): to_json(__SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
713
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
0
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowerCAmelCase_ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' lowerCAmelCase_ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' lowerCAmelCase_ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]: return float((preds == labels).mean() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="binary" )-> int: _SCREAMING_SNAKE_CASE = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE = {} for id_pred, label in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}""" _SCREAMING_SNAKE_CASE = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _SCREAMING_SNAKE_CASE = [(pred, label)] _SCREAMING_SNAKE_CASE = [], [] for question, preds_labels in question_map.items(): _SCREAMING_SNAKE_CASE = zip(*__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average="""macro""" ) fas.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = int(sum(pred == label for pred, label in preds_labels ) == len(__SCREAMING_SNAKE_CASE ) ) ems.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = float(sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : str): """simple docstring""" if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _lowerCAmelCase ( self : Dict): """simple docstring""" if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64"""), "query": datasets.Value("""int64"""), }, "prediction_text": datasets.Value("""string"""), }, "references": { "idx": { "passage": datasets.Value("""int64"""), "query": datasets.Value("""int64"""), }, "answers": datasets.Sequence(datasets.Value("""string""")), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64"""), "paragraph": datasets.Value("""int64"""), "question": datasets.Value("""int64"""), }, "prediction": datasets.Value("""int64"""), }, "references": datasets.Value("""int64"""), } else: return { "predictions": datasets.Value("""int64"""), "references": datasets.Value("""int64"""), } def _lowerCAmelCase ( self : int , _A : Optional[Any] , _A : List[str]): """simple docstring""" if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_A , _A)} elif self.config_name == "cb": return acc_and_fa(_A , _A , fa_avg="""macro""") elif self.config_name == "record": _SCREAMING_SNAKE_CASE = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] _SCREAMING_SNAKE_CASE = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(_A , _A)[0] elif self.config_name == "multirc": return evaluate_multirc(_A , _A) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_A , _A)} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
714
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
0
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase_ = 3 def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: print("""Generating primitive root of p""" ) while True: _SCREAMING_SNAKE_CASE : Any = random.randrange(3 , __SCREAMING_SNAKE_CASE ) if pow(__SCREAMING_SNAKE_CASE , 2 , __SCREAMING_SNAKE_CASE ) == 1: continue if pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == 1: continue return g def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> tuple[tuple[int, int, int, int], tuple[int, int]]: print("""Generating prime p...""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = rabin_miller.generate_large_prime(__SCREAMING_SNAKE_CASE ) # select large prime number. _SCREAMING_SNAKE_CASE : int = primitive_root(__SCREAMING_SNAKE_CASE ) # one primitive root on modulo p. _SCREAMING_SNAKE_CASE : Any = random.randrange(3 , __SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety. _SCREAMING_SNAKE_CASE : List[str] = cryptomath.find_mod_inverse(pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = (key_size, e_a, e_a, p) _SCREAMING_SNAKE_CASE : Dict = (key_size, d) return public_key, private_key def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> None: if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("""\nWARNING:""" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" """Use a different name or delete these files and re-run this program.""" ) sys.exit() _SCREAMING_SNAKE_CASE : Any = generate_key(__SCREAMING_SNAKE_CASE ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , """w""" ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , """w""" ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def lowerCamelCase_()-> None: print("""Making key files...""" ) make_key_files("""elgamal""" , 2_048 ) print("""Key files generation successful""" ) if __name__ == "__main__": main()
715
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from math import factorial lowerCAmelCase_ = {str(d): factorial(d) for d in range(10)} def lowerCamelCase_( __SCREAMING_SNAKE_CASE )-> int: return sum(DIGIT_FACTORIAL[d] for d in str(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase_( )-> int: _SCREAMING_SNAKE_CASE : Optional[Any] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , __SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(__SCREAMING_SNAKE_CASE ) == i ) if __name__ == "__main__": print(F"{solution() = }")
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
717
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [[1, 2, 4], [1, 2, 3, 4]] _SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(_A) self.assertTrue(isinstance(dc.token_ids , _A)) with self.assertRaises(_A): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(_A): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A): DisjunctiveConstraint(_A) # fails here def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2, 3], [1, 2, 4]] _SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A) _SCREAMING_SNAKE_CASE : Any = dc.update(1) _SCREAMING_SNAKE_CASE : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) _SCREAMING_SNAKE_CASE : List[str] = dc.update(2) _SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is False and reset is False self.assertTrue(_A) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) _SCREAMING_SNAKE_CASE : Dict = dc.update(3) _SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False self.assertTrue(_A) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] _SCREAMING_SNAKE_CASE : Dict = DisjunctiveConstraint(_A) _SCREAMING_SNAKE_CASE : Optional[int] = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) _SCREAMING_SNAKE_CASE : Optional[int] = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) _SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) _SCREAMING_SNAKE_CASE : int = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() _SCREAMING_SNAKE_CASE : List[str] = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) _SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) _SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
718
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
0
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : """simple docstring""" def __init__( self : Union[str, Any] , _A : Optional[int] , _A : Tuple=1_3 , _A : Tuple=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : List[Any]=True , _A : int=True , _A : List[str]=9_9 , _A : List[str]=3_2 , _A : Optional[int]=2 , _A : Any=4 , _A : List[Any]=3_7 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : Any=0.1 , _A : List[str]=5_1_2 , _A : int=1_6 , _A : Optional[Any]=2 , _A : Dict=0.02 , _A : List[Any]=3 , _A : List[str]=4 , _A : Optional[Any]=None , _A : List[Any]=1_0_0_0 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = parent _SCREAMING_SNAKE_CASE : Any = batch_size _SCREAMING_SNAKE_CASE : Tuple = seq_length _SCREAMING_SNAKE_CASE : Optional[Any] = is_training _SCREAMING_SNAKE_CASE : List[Any] = use_input_mask _SCREAMING_SNAKE_CASE : Dict = use_token_type_ids _SCREAMING_SNAKE_CASE : List[str] = use_labels _SCREAMING_SNAKE_CASE : List[Any] = vocab_size _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Dict = num_hidden_layers _SCREAMING_SNAKE_CASE : int = num_attention_heads _SCREAMING_SNAKE_CASE : int = intermediate_size _SCREAMING_SNAKE_CASE : Optional[int] = hidden_act _SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size _SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size _SCREAMING_SNAKE_CASE : List[str] = initializer_range _SCREAMING_SNAKE_CASE : List[str] = num_labels _SCREAMING_SNAKE_CASE : Optional[int] = num_choices _SCREAMING_SNAKE_CASE : Union[str, Any] = scope _SCREAMING_SNAKE_CASE : Any = range_bbox def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) # convert bbox to numpy since TF does not support item assignment _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _SCREAMING_SNAKE_CASE : List[Any] = bbox[i, j, 3] _SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 1] _SCREAMING_SNAKE_CASE : Any = t if bbox[i, j, 2] < bbox[i, j, 0]: _SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2] _SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 0] _SCREAMING_SNAKE_CASE : Optional[Any] = t _SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(_A) _SCREAMING_SNAKE_CASE : str = None if self.use_input_mask: _SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) _SCREAMING_SNAKE_CASE : List[str] = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _SCREAMING_SNAKE_CASE : Union[str, Any] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices) _SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : Any , _A : Optional[int] , _A : List[str] , _A : Optional[int] , _A : str , _A : Optional[int] , _A : Dict , _A : int , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = TFLayoutLMModel(config=_A) _SCREAMING_SNAKE_CASE : Optional[int] = model(_A , _A , attention_mask=_A , token_type_ids=_A) _SCREAMING_SNAKE_CASE : List[Any] = model(_A , _A , token_type_ids=_A) _SCREAMING_SNAKE_CASE : Dict = model(_A , _A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _lowerCAmelCase ( self : Optional[int] , _A : Optional[int] , _A : str , _A : Tuple , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : Tuple , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = TFLayoutLMForMaskedLM(config=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _lowerCAmelCase ( self : Dict , _A : Dict , _A : Dict , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels _SCREAMING_SNAKE_CASE : Optional[Any] = TFLayoutLMForSequenceClassification(config=_A) _SCREAMING_SNAKE_CASE : List[str] = model(_A , _A , attention_mask=_A , token_type_ids=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCAmelCase ( self : Dict , _A : List[str] , _A : Dict , _A : str , _A : str , _A : Optional[int] , _A : Dict , _A : Optional[Any] , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.num_labels _SCREAMING_SNAKE_CASE : Dict = TFLayoutLMForTokenClassification(config=_A) _SCREAMING_SNAKE_CASE : List[str] = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _lowerCAmelCase ( self : Optional[Any] , _A : List[str] , _A : Any , _A : List[Any] , _A : List[Any] , _A : Any , _A : Any , _A : Optional[int] , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMForQuestionAnswering(config=_A) _SCREAMING_SNAKE_CASE : Tuple = model(_A , _A , attention_mask=_A , token_type_ids=_A) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() ( _SCREAMING_SNAKE_CASE ) : List[str] = config_and_inputs _SCREAMING_SNAKE_CASE : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) a = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) a = False a = True a = 10 def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = TFLayoutLMModelTester(self) _SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_A , hidden_size=3_7) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A) @slow def _lowerCAmelCase ( self : Any): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Dict = TFLayoutLMModel.from_pretrained(_A) self.assertIsNotNone(_A) @unittest.skip("""Onnx compliancy broke with TF 2.10""") def _lowerCAmelCase ( self : Any): """simple docstring""" pass def lowerCamelCase_()-> Union[str, Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 _SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 _SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""") _SCREAMING_SNAKE_CASE : List[str] = prepare_layoutlm_batch_inputs() # forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A) # test the sequence output on [0, :3, :3] _SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-3)) # test the pooled output on [1, :3] _SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552]) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1e-3)) @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2) _SCREAMING_SNAKE_CASE : str = prepare_layoutlm_batch_inputs() # forward pass _SCREAMING_SNAKE_CASE : List[str] = model( input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1]) , ) # test whether we get a loss as a scalar _SCREAMING_SNAKE_CASE : str = outputs.loss _SCREAMING_SNAKE_CASE : Dict = (2,) self.assertEqual(loss.shape , _A) # test the shape of the logits _SCREAMING_SNAKE_CASE : Any = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = (2, 2) self.assertEqual(logits.shape , _A) @slow def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=1_3) _SCREAMING_SNAKE_CASE : Tuple = prepare_layoutlm_batch_inputs() # forward pass _SCREAMING_SNAKE_CASE : int = model( input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A) # test the shape of the logits _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3)) self.assertEqual(logits.shape , _A) @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""") _SCREAMING_SNAKE_CASE : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass _SCREAMING_SNAKE_CASE : List[str] = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A) # test the shape of the logits _SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor((2, 2_5)) self.assertEqual(outputs.start_logits.shape , _A) self.assertEqual(outputs.end_logits.shape , _A)
719
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
0
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
720
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
0
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class _snake_case ( nn.Module ): """simple docstring""" def __init__( self : Dict): """simple docstring""" super().__init__() _SCREAMING_SNAKE_CASE : Dict = nn.Linear(3 , 4) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.BatchNormad(4) _SCREAMING_SNAKE_CASE : List[str] = nn.Linear(4 , 5) def _lowerCAmelCase ( self : int , _A : Optional[Any]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(_A))) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , model.state_dict()) _SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_A , """index.json""") self.assertTrue(os.path.isfile(_A)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: _SCREAMING_SNAKE_CASE : int = os.path.join(_A , f"""{key}.dat""") self.assertTrue(os.path.isfile(_A)) # TODO: add tests on the fact weights are properly loaded def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(2 , 3 , dtype=_A) with TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE : Tuple = offload_weight(_A , """weight""" , _A , {}) _SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_A , """weight.dat""") self.assertTrue(os.path.isfile(_A)) self.assertDictEqual(_A , {"""weight""": {"""shape""": [2, 3], """dtype""": str(_A).split(""".""")[1]}}) _SCREAMING_SNAKE_CASE : Optional[Any] = load_offloaded_weight(_A , index["""weight"""]) self.assertTrue(torch.equal(_A , _A)) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = ModelForTest() _SCREAMING_SNAKE_CASE : Optional[Any] = model.state_dict() _SCREAMING_SNAKE_CASE : Any = {k: v for k, v in state_dict.items() if """linear2""" not in k} _SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A) _SCREAMING_SNAKE_CASE : Dict = OffloadedWeightsLoader(state_dict=_A , save_folder=_A) # Every key is there with the right value self.assertEqual(sorted(_A) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key])) _SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in state_dict.items() if """weight""" in k} _SCREAMING_SNAKE_CASE : Optional[Any] = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A) _SCREAMING_SNAKE_CASE : Tuple = OffloadedWeightsLoader(state_dict=_A , save_folder=_A) # Every key is there with the right value self.assertEqual(sorted(_A) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A) # Duplicates are removed _SCREAMING_SNAKE_CASE : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A) # Every key is there with the right value self.assertEqual(sorted(_A) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key])) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} _SCREAMING_SNAKE_CASE : List[Any] = extract_submodules_state_dict(_A , ["""a.1""", """a.2"""]) self.assertDictEqual(_A , {"""a.1""": 0, """a.2""": 2}) _SCREAMING_SNAKE_CASE : int = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} _SCREAMING_SNAKE_CASE : str = extract_submodules_state_dict(_A , ["""a.1""", """a.2"""]) self.assertDictEqual(_A , {"""a.1.a""": 0, """a.2.a""": 2})
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : Optional[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : List[str] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : str = do_rescale SCREAMING_SNAKE_CASE__ : Any = rescale_factor SCREAMING_SNAKE_CASE__ : List[Any] = do_pad SCREAMING_SNAKE_CASE__ : int = pad_size def __lowercase( self : List[Any] , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Tuple )-> np.ndarray: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : List[str] , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_image_size(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE__ : int = (old_width // size + 1) * size - old_width return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ ) def __lowercase( self : List[str] , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Any , )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : Tuple = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE__ : List[Any] = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_pad: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.pad(a_ , size=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Tuple = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ )
636
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _a ( lowercase__ : Any ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [state.process_index] SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _a ( lowercase__ : int ): '''simple docstring''' if state.is_main_process: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device ) else: SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device ) SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' main() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = PartialState() state.print(f'''State: {state}''' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
636
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : int = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = GPTSwaTokenizer lowercase_ = False lowercase_ = True lowercase_ = False def __lowercase( self : int )-> Any: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Dict = GPTSwaTokenizer(a_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Any , a_ : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'This is a test' SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'This is a test' return input_text, output_text def __lowercase( self : List[str] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = '<s>' SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Dict )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(a_ ) , 2000 ) def __lowercase( self : Tuple )-> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def __lowercase( self : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTSwaTokenizer(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(a_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [465, 287, 265, 631, 842] ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) # fmt: off self.assertListEqual( a_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_ids_to_tokens(a_ ) # fmt: off self.assertListEqual( a_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] ) # fmt: on def __lowercase( self : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = GPTSwaTokenizer(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = ['This is a test', 'I was born in 92000, and this is falsé.'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(a_ , a_ ): self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ ) # Test that decode_fast returns the input text for text, token_ids in zip(a_ , a_ ): self.assertEqual(tokenizer.decode_fast(a_ ) , a_ ) @slow def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [ '<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')', 'Hey there, how are you doing this fine day?', 'This is a text with a trailing spaces followed by a dot .', 'Häj sväjs lillebrör! =)', 'Det är inget fel på Mr. Cool', ] # fmt: off SCREAMING_SNAKE_CASE__ : Any = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=a_ , )
636
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE__ : Any = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : Any = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size SCREAMING_SNAKE_CASE__ : List[str] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : List[Any] = size SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096] SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16} def __lowercase( self : Optional[Any] )-> str: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowercase( self : Dict )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self ) @property def __lowercase( self : Dict )-> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE__ : List[Any] = 2048 SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __lowercase( self : Any )-> Tuple: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : Any )-> Any: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 SCREAMING_SNAKE_CASE__ : int = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(a_ ): SCREAMING_SNAKE_CASE__ : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello' SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processor( a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : str = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : int = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 ) SCREAMING_SNAKE_CASE__ : Dict = 3 @property def __lowercase( self : Any )-> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
636
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class snake_case ( UpperCamelCase_ ): lowercase_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
636
import heapq as hq import math from collections.abc import Iterator class snake_case : def __init__( self : str , a_ : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = str(id_ ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance} def __lt__( self : int , a_ : Tuple )-> Union[str, Any]: """simple docstring""" return self.key < other.key def __repr__( self : Any )-> Dict: """simple docstring""" return self.id def __lowercase( self : Optional[Any] , a_ : int )-> List[str]: """simple docstring""" self.neighbors.append(a_ ) def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = weight def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowercase__ ) graph[b - 1].add_edge(graph[a - 1] , lowercase__ ) def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] for u in graph: SCREAMING_SNAKE_CASE__ : Dict = math.inf SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : int = graph[:] while q: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ ) q.remove(lowercase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : int = u SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id] for i in range(1 , len(lowercase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' for u in graph: SCREAMING_SNAKE_CASE__ : List[str] = math.inf SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ ) hq.heapify(lowercase__ ) while h: SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : List[str] = u SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id] hq.heapify(lowercase__ ) for i in range(1 , len(lowercase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
636
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = r'\w+[.]\d+' SCREAMING_SNAKE_CASE__ : Dict = re.findall(lowercase__ , lowercase__ ) for pat in pats: SCREAMING_SNAKE_CASE__ : int = key.replace(lowercase__ , '_'.join(pat.split('.' ) ) ) return key def _a ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',) if ( any('norm' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): SCREAMING_SNAKE_CASE__ : int = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: SCREAMING_SNAKE_CASE__ : List[Any] = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: SCREAMING_SNAKE_CASE__ : Optional[int] = pt_tuple_key[:-1] + ('embedding',) return renamed_pt_tuple_key, pt_tensor # conv layer SCREAMING_SNAKE_CASE__ : List[Any] = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: SCREAMING_SNAKE_CASE__ : Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer SCREAMING_SNAKE_CASE__ : int = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight": SCREAMING_SNAKE_CASE__ : Dict = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight SCREAMING_SNAKE_CASE__ : Optional[int] = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias SCREAMING_SNAKE_CASE__ : Dict = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _a ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str]=42 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params SCREAMING_SNAKE_CASE__ : int = flax_model.init_weights(PRNGKey(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : Any = flatten_dict(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): SCREAMING_SNAKE_CASE__ : Any = rename_key(lowercase__ ) SCREAMING_SNAKE_CASE__ : Dict = tuple(renamed_pt_key.split('.' ) ) # Correctly rename weight parameters SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = rename_key_and_reshape_tensor(lowercase__ , lowercase__ , lowercase__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown SCREAMING_SNAKE_CASE__ : Dict = jnp.asarray(lowercase__ ) return unflatten_dict(lowercase__ )
636
def _a ( lowercase__ : int , lowercase__ : int ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def _a ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
636
1
import baseaa def _a ( lowercase__ : str ): '''simple docstring''' return baseaa.baaencode(string.encode('utf-8' ) ) def _a ( lowercase__ : bytes ): '''simple docstring''' return baseaa.baadecode(lowercase__ ).decode('utf-8' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = "Hello World!" SCREAMING_SNAKE_CASE__ : Optional[int] = baseaa_encode(test) print(encoded) SCREAMING_SNAKE_CASE__ : Union[str, Any] = baseaa_decode(encoded) print(decoded)
636
from math import factorial, radians def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__("doctest").testmod()
636
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class snake_case ( UpperCamelCase_ ): @staticmethod @abstractmethod def __lowercase( a_ : ArgumentParser )-> Optional[Any]: """simple docstring""" raise NotImplementedError() @abstractmethod def __lowercase( self : int )-> Optional[int]: """simple docstring""" raise NotImplementedError()
636
import math def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = factor * value SCREAMING_SNAKE_CASE__ : Dict = value while not is_prime(lowercase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **lowercase__ ) return value
636
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class snake_case ( UpperCamelCase_ ): lowercase_ = 'char' lowercase_ = 'bpe' lowercase_ = 'wp' SCREAMING_SNAKE_CASE__ : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'char_tokenizer'] lowercase_ = 'ViTImageProcessor' lowercase_ = 'MgpstrTokenizer' def __init__( self : List[str] , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , **a_ : List[str] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a_ , ) SCREAMING_SNAKE_CASE__ : int = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(a_ , a_ ) def __call__( self : Tuple , a_ : int=None , a_ : Any=None , a_ : Any=None , **a_ : Any )-> Optional[Any]: """simple docstring""" if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.char_tokenizer(a_ , return_tensors=a_ , **a_ ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE__ : Optional[Any] = encodings['input_ids'] return inputs def __lowercase( self : Union[str, Any] , a_ : List[str] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = sequences SCREAMING_SNAKE_CASE__ : Tuple = char_preds.size(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._decode_helper(a_ , 'char' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._decode_helper(a_ , 'bpe' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._decode_helper(a_ , 'wp' ) SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Dict = [] for i in range(a_ ): SCREAMING_SNAKE_CASE__ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE__ : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE__ : Optional[Any] = scores.index(max(a_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} SCREAMING_SNAKE_CASE__ : Optional[Any] = final_strs SCREAMING_SNAKE_CASE__ : Optional[Any] = final_scores SCREAMING_SNAKE_CASE__ : Tuple = char_strs SCREAMING_SNAKE_CASE__ : Optional[int] = bpe_strs SCREAMING_SNAKE_CASE__ : Dict = wp_strs return out def __lowercase( self : List[Any] , a_ : Optional[int] , a_ : Any )-> Any: """simple docstring""" if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.char_decode SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Any = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE__ : Any = self.bpe_decode SCREAMING_SNAKE_CASE__ : Dict = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE__ : int = self.wp_decode SCREAMING_SNAKE_CASE__ : Any = 102 SCREAMING_SNAKE_CASE__ : Optional[Any] = '[SEP]' else: raise ValueError(F'''Format {format} is not supported.''' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = [], [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE__ : int = pred_logits.size(1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=a_ , sorted=a_ ) SCREAMING_SNAKE_CASE__ : Dict = preds_index.view(-1 , a_ )[:, 1:] SCREAMING_SNAKE_CASE__ : Optional[int] = decoder(a_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = torch.nn.functional.softmax(a_ , dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE__ : str = preds_max_prob[:, 1:] for index in range(a_ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = preds_str[index].find(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE__ : Tuple = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE__ : List[Any] = pred_index.index(a_ ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE__ : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE__ : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(a_ ) conf_scores.append(a_ ) return dec_strs, conf_scores def __lowercase( self : Dict , a_ : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a_ )] return decode_strs def __lowercase( self : List[Any] , a_ : List[Any] )-> str: """simple docstring""" return self.bpe_tokenizer.batch_decode(a_ ) def __lowercase( self : Optional[Any] , a_ : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a_ )] return decode_strs
636
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = scope SCREAMING_SNAKE_CASE__ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2 def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : int = model(a_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" pass def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a_ ), *get_values(a_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ): SCREAMING_SNAKE_CASE__ : int = problem_type['title'] SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels'] SCREAMING_SNAKE_CASE__ : str = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a_ ) as warning_list: SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : int )-> Dict: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img() SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowercase( self : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
636
1
SCREAMING_SNAKE_CASE__ : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" SCREAMING_SNAKE_CASE__ : str = [{"type": "code", "content": INSTALL_CONTENT}] SCREAMING_SNAKE_CASE__ : str = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
636
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Dict = batch_size SCREAMING_SNAKE_CASE__ : Dict = seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = num_labels SCREAMING_SNAKE_CASE__ : Dict = num_choices SCREAMING_SNAKE_CASE__ : str = scope def __lowercase( self : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase( self : Dict )-> Tuple: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ ) model.to(a_ ) model.eval() # create attention mask SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 # first forward pass SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens # append to next input_ids and attn_mask SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Dict = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , ) # get two different outputs SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval() SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) # first forward pass SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[ 'last_hidden_state' ] # select random slice SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ ) model.to(a_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.num_labels SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase( self : Any )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowercase_ = (BioGptForCausalLM,) if is_torch_available() else () lowercase_ = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def __lowercase( self : Tuple )-> int: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ : List[str] = type self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : int )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ ) def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*a_ ) def __lowercase( self : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*a_ ) @slow def __lowercase( self : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : List[str] = 'left' # Define PAD Token = EOS Token = 50256 SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id # use different length sentences to test batching SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = model.generate( input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(a_ , a_ ) self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] = 3 SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : str = 3 SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification' SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class snake_case ( unittest.TestCase ): @slow def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0] SCREAMING_SNAKE_CASE__ : List[str] = 4_2384 SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , a_ ) SCREAMING_SNAKE_CASE__ : int = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) ) @slow def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( **a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , ) SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(a_ , a_ )
636
1
from math import factorial, radians def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__("doctest").testmod()
636
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random() def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ): '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = parent SCREAMING_SNAKE_CASE__ : List[Any] = batch_size SCREAMING_SNAKE_CASE__ : str = min_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__ : int = feature_size SCREAMING_SNAKE_CASE__ : str = padding_value SCREAMING_SNAKE_CASE__ : Any = sampling_rate SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize SCREAMING_SNAKE_CASE__ : int = num_mel_bins SCREAMING_SNAKE_CASE__ : int = hop_length SCREAMING_SNAKE_CASE__ : str = win_length SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function SCREAMING_SNAKE_CASE__ : List[str] = fmin SCREAMING_SNAKE_CASE__ : Dict = fmax SCREAMING_SNAKE_CASE__ : int = mel_floor SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask def __lowercase( self : Dict )-> Dict: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]: """simple docstring""" def _flatten(a_ : int ): return list(itertools.chain(*a_ ) ) if equal_length: SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]: """simple docstring""" if equal_length: SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Tuple = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = SpeechTaFeatureExtractor def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self ) def __lowercase( self : Any , a_ : Optional[int] )-> List[str]: """simple docstring""" self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) ) def __lowercase( self : Tuple )-> Dict: """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None] for max_length, padding in zip(a_ , a_ ): SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 ) SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths] SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None] for max_length, padding in zip(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ ) SCREAMING_SNAKE_CASE__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __lowercase( self : int )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract( a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __lowercase( self : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : List[str] = feat_extract( a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : str = feat_extract( a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __lowercase( self : Any )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : Dict )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __lowercase( self : Tuple )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , a_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ ) def __lowercase( self : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : str = min(a_ ) SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : int = feat_extract.pad( a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' ) self.assertIn('attention_mask' , a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __lowercase( self : Optional[int] , a_ : List[str] )-> Any: """simple docstring""" from datasets import load_dataset SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def __lowercase( self : List[str] )-> List[Any]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor( [2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3, 3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3, 2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4, 4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3, 7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4, 4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] ) # fmt: on SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) ) def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
636
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : int )-> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : Optional[int] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = '</s>' SCREAMING_SNAKE_CASE__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Dict )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(a_ ) , 1103 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example'] SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. @slow def __lowercase( self : Any )-> str: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> List[str]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : Tuple )-> str: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) @require_torch def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example'] SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids self.assertListEqual( a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
636
import math import sys def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '' try: with open(lowercase__ , 'rb' ) as binary_file: SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', '' SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ ) for i in range(len(lowercase__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string] result += last_match_id SCREAMING_SNAKE_CASE__ : str = last_match_id + '0' if math.loga(lowercase__ ).is_integer(): SCREAMING_SNAKE_CASE__ : List[str] = {} for curr_key in list(lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1' index += 1 SCREAMING_SNAKE_CASE__ : Tuple = '' return result def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = 8 try: with open(lowercase__ , 'wb' ) as opened_file: SCREAMING_SNAKE_CASE__ : Dict = [ to_write[i : i + byte_length] for i in range(0 , len(lowercase__ ) , lowercase__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 0 for letter in data_bits: if letter == "1": break counter += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:] SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :] return data_bits def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ ) SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ ) write_file_binary(lowercase__ , lowercase__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
636
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets SCREAMING_SNAKE_CASE__ : Any = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" SCREAMING_SNAKE_CASE__ : Dict = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" SCREAMING_SNAKE_CASE__ : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def _a ( lowercase__ : str ): '''simple docstring''' def remove_articles(lowercase__ : str ): SCREAMING_SNAKE_CASE__ : str = re.compile(r'\b(a|an|the)\b' , re.UNICODE ) return re.sub(lowercase__ , ' ' , lowercase__ ) def white_space_fix(lowercase__ : Dict ): return " ".join(text.split() ) def remove_punc(lowercase__ : Any ): SCREAMING_SNAKE_CASE__ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowercase__ : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowercase__ ) ) ) ) def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ): '''simple docstring''' return int(normalize_answer(lowercase__ ) == normalize_answer(lowercase__ ) ) def _a ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [any(compute_exact(lowercase__ , lowercase__ ) for ref in refs ) for pred, refs in zip(lowercase__ , lowercase__ )] return (sum(lowercase__ ) / len(lowercase__ )) * 1_00 def _a ( lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Any , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = [rgram for rgrams in rgramslist for rgram in rgrams] SCREAMING_SNAKE_CASE__ : List[Any] = Counter(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = Counter(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = Counter() for sgram, scount in sgramcounter.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = scount * numref SCREAMING_SNAKE_CASE__ : int = Counter(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = Counter() for cgram, ccount in cgramcounter.items(): SCREAMING_SNAKE_CASE__ : Dict = ccount * numref # KEEP SCREAMING_SNAKE_CASE__ : str = sgramcounter_rep & cgramcounter_rep SCREAMING_SNAKE_CASE__ : Any = keepgramcounter_rep & rgramcounter SCREAMING_SNAKE_CASE__ : Dict = sgramcounter_rep & rgramcounter SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 SCREAMING_SNAKE_CASE__ : List[Any] = 1 if len(lowercase__ ) > 0: SCREAMING_SNAKE_CASE__ : int = keeptmpscorea / len(lowercase__ ) if len(lowercase__ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) SCREAMING_SNAKE_CASE__ : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() ) SCREAMING_SNAKE_CASE__ : str = 0 if keepscore_precision > 0 or keepscore_recall > 0: SCREAMING_SNAKE_CASE__ : int = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION SCREAMING_SNAKE_CASE__ : Any = sgramcounter_rep - cgramcounter_rep SCREAMING_SNAKE_CASE__ : Optional[int] = delgramcounter_rep - rgramcounter SCREAMING_SNAKE_CASE__ : int = sgramcounter_rep - rgramcounter SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE__ : str = 1 if len(lowercase__ ) > 0: SCREAMING_SNAKE_CASE__ : int = deltmpscorea / len(lowercase__ ) # ADDITION SCREAMING_SNAKE_CASE__ : Tuple = set(lowercase__ ) - set(lowercase__ ) SCREAMING_SNAKE_CASE__ : int = set(lowercase__ ) & set(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = set(lowercase__ ) - set(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 SCREAMING_SNAKE_CASE__ : List[Any] = 1 if len(lowercase__ ) > 0: SCREAMING_SNAKE_CASE__ : str = addtmpscore / len(lowercase__ ) if len(lowercase__ ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = addtmpscore / len(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 if addscore_precision > 0 or addscore_recall > 0: SCREAMING_SNAKE_CASE__ : Tuple = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def _a ( lowercase__ : Dict , lowercase__ : Any , lowercase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ ) SCREAMING_SNAKE_CASE__ : int = ssent.split(' ' ) SCREAMING_SNAKE_CASE__ : List[str] = csent.split(' ' ) SCREAMING_SNAKE_CASE__ : int = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : List[str] = [] SCREAMING_SNAKE_CASE__ : Dict = [] SCREAMING_SNAKE_CASE__ : int = [] SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : str = [] for rsent in rsents: SCREAMING_SNAKE_CASE__ : List[Any] = rsent.split(' ' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : int = [] ragramslist.append(lowercase__ ) for i in range(0 , len(lowercase__ ) - 1 ): if i < len(lowercase__ ) - 1: SCREAMING_SNAKE_CASE__ : List[str] = ragrams[i] + ' ' + ragrams[i + 1] ragrams.append(lowercase__ ) if i < len(lowercase__ ) - 2: SCREAMING_SNAKE_CASE__ : Dict = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] ragrams.append(lowercase__ ) if i < len(lowercase__ ) - 3: SCREAMING_SNAKE_CASE__ : Optional[Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3] ragrams.append(lowercase__ ) ragramslist.append(lowercase__ ) ragramslist.append(lowercase__ ) ragramslist.append(lowercase__ ) for i in range(0 , len(lowercase__ ) - 1 ): if i < len(lowercase__ ) - 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1] sagrams.append(lowercase__ ) if i < len(lowercase__ ) - 2: SCREAMING_SNAKE_CASE__ : Dict = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] sagrams.append(lowercase__ ) if i < len(lowercase__ ) - 3: SCREAMING_SNAKE_CASE__ : str = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3] sagrams.append(lowercase__ ) for i in range(0 , len(lowercase__ ) - 1 ): if i < len(lowercase__ ) - 1: SCREAMING_SNAKE_CASE__ : List[str] = cagrams[i] + ' ' + cagrams[i + 1] cagrams.append(lowercase__ ) if i < len(lowercase__ ) - 2: SCREAMING_SNAKE_CASE__ : Optional[int] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] cagrams.append(lowercase__ ) if i < len(lowercase__ ) - 3: SCREAMING_SNAKE_CASE__ : Optional[int] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3] cagrams.append(lowercase__ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Tuple = SARIngram(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : int = SARIngram(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Tuple = SARIngram(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Tuple = SARIngram(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 SCREAMING_SNAKE_CASE__ : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4 SCREAMING_SNAKE_CASE__ : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4 SCREAMING_SNAKE_CASE__ : str = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def _a ( lowercase__ : Tuple , lowercase__ : bool = True , lowercase__ : str = "13a" , lowercase__ : bool = True ): '''simple docstring''' if lowercase: SCREAMING_SNAKE_CASE__ : Union[str, Any] = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: SCREAMING_SNAKE_CASE__ : Optional[int] = sacrebleu.metrics.bleu._get_tokenizer(lowercase__ )()(lowercase__ ) else: SCREAMING_SNAKE_CASE__ : Dict = sacrebleu.TOKENIZERS[tokenizer]()(lowercase__ ) elif tokenizer == "moses": SCREAMING_SNAKE_CASE__ : str = sacremoses.MosesTokenizer().tokenize(lowercase__ , return_str=lowercase__ , escape=lowercase__ ) elif tokenizer == "penn": SCREAMING_SNAKE_CASE__ : List[Any] = sacremoses.MosesTokenizer().penn_tokenize(lowercase__ , return_str=lowercase__ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = sentence if not return_str: SCREAMING_SNAKE_CASE__ : Optional[Any] = normalized_sent.split() return normalized_sent def _a ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ): '''simple docstring''' if not (len(lowercase__ ) == len(lowercase__ ) == len(lowercase__ )): raise ValueError('Sources length must match predictions and references lengths.' ) SCREAMING_SNAKE_CASE__ : List[Any] = 0 for src, pred, refs in zip(lowercase__ , lowercase__ , lowercase__ ): sari_score += SARIsent(normalize(lowercase__ ) , normalize(lowercase__ ) , [normalize(lowercase__ ) for sent in refs] ) SCREAMING_SNAKE_CASE__ : Dict = sari_score / len(lowercase__ ) return 1_00 * sari_score def _a ( lowercase__ : str , lowercase__ : str , lowercase__ : Any="exp" , lowercase__ : Any=None , lowercase__ : List[str]=False , lowercase__ : Tuple=False , lowercase__ : List[str]=False , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = len(references[0] ) if any(len(lowercase__ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) SCREAMING_SNAKE_CASE__ : Optional[int] = [[refs[i] for refs in references] for i in range(lowercase__ )] SCREAMING_SNAKE_CASE__ : Optional[Any] = sacrebleu.corpus_bleu( lowercase__ , lowercase__ , smooth_method=lowercase__ , smooth_value=lowercase__ , force=lowercase__ , lowercase=lowercase__ , use_effective_order=lowercase__ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): def __lowercase( self : int )-> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=[ 'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py', 'https://github.com/cocoxu/simplification/blob/master/SARI.py', 'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py', 'https://github.com/mjpost/sacreBLEU', ] , reference_urls=[ 'https://www.aclweb.org/anthology/Q16-1029.pdf', 'https://github.com/mjpost/sacreBLEU', 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def __lowercase( self : int , a_ : Dict , a_ : Dict , a_ : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = {} result.update({'sari': compute_sari(sources=a_ , predictions=a_ , references=a_ )} ) result.update({'sacrebleu': compute_sacrebleu(predictions=a_ , references=a_ )} ) result.update({'exact': compute_em(predictions=a_ , references=a_ )} ) return result
636
def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} ) SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'} for i in range(len(lowercase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowercase__ ) == 0 def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' ) if is_balanced(lowercase__ ): print(lowercase__ , 'is balanced' ) else: print(lowercase__ , 'is not balanced' ) if __name__ == "__main__": main()
636
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class snake_case : def __init__( self : Optional[Any] , a_ : List[str] , a_ : List[Any]=13 , a_ : Any=7 , a_ : List[Any]=True , a_ : Optional[int]=True , a_ : Any=True , a_ : List[str]=True , a_ : Dict=99 , a_ : int=64 , a_ : Dict=32 , a_ : Optional[Any]=5 , a_ : Dict=4 , a_ : Any=37 , a_ : str="gelu" , a_ : List[Any]=0.1 , a_ : Tuple=0.1 , a_ : List[Any]=512 , a_ : List[str]=16 , a_ : Union[str, Any]=2 , a_ : List[str]=0.02 , a_ : List[str]=3 , a_ : List[Any]=4 , a_ : Any=None , )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : str = seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = is_training SCREAMING_SNAKE_CASE__ : Dict = use_input_mask SCREAMING_SNAKE_CASE__ : int = use_token_type_ids SCREAMING_SNAKE_CASE__ : Tuple = use_labels SCREAMING_SNAKE_CASE__ : int = vocab_size SCREAMING_SNAKE_CASE__ : int = hidden_size SCREAMING_SNAKE_CASE__ : str = embedding_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size SCREAMING_SNAKE_CASE__ : List[str] = hidden_act SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings SCREAMING_SNAKE_CASE__ : str = type_vocab_size SCREAMING_SNAKE_CASE__ : List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Dict = initializer_range SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE__ : Dict = num_choices SCREAMING_SNAKE_CASE__ : Optional[int] = scope def __lowercase( self : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Tuple = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase( self : Tuple )-> Tuple: """simple docstring""" return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def __lowercase( self : int , a_ : List[Any] , a_ : List[Any] , a_ : str , a_ : Optional[Any] , a_ : Any , a_ : int , a_ : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = MegatronBertModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , token_type_ids=a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowercase( self : str , a_ : Tuple , a_ : str , a_ : int , a_ : Any , a_ : str , a_ : Optional[int] , a_ : Union[str, Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MegatronBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : List[str] , a_ : Dict , a_ : List[str] , a_ : Dict , a_ : Optional[Any] , a_ : int , a_ : Tuple , a_ : List[str] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = MegatronBertForCausalLM(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : Optional[int] , a_ : Union[str, Any] , a_ : List[Any] , a_ : int , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = MegatronBertForNextSentencePrediction(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowercase( self : Dict , a_ : List[Any] , a_ : Optional[Any] , a_ : List[str] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Dict )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = MegatronBertForPreTraining(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , next_sentence_label=a_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowercase( self : str , a_ : Optional[int] , a_ : int , a_ : Any , a_ : str , a_ : Optional[int] , a_ : Dict , a_ : List[str] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = MegatronBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Any = model( a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowercase( self : Optional[Any] , a_ : List[str] , a_ : List[Any] , a_ : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE__ : str = MegatronBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : Tuple , a_ : List[Any] , a_ : Any , a_ : Optional[int] , a_ : Any , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Any = MegatronBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Tuple , a_ : Dict , a_ : str , a_ : str , a_ : List[Any] , a_ : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.num_choices SCREAMING_SNAKE_CASE__ : Tuple = MegatronBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : str = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Any = config_and_inputs SCREAMING_SNAKE_CASE__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = True # test_resize_embeddings = False lowercase_ = False def __lowercase( self : str , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str]=False )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class in get_values(a_ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a_ ) SCREAMING_SNAKE_CASE__ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = MegatronBertModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def __lowercase( self : Optional[int] )-> int: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*a_ ) def __lowercase( self : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*a_ ) def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*a_ ) def __lowercase( self : int )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*a_ ) def __lowercase( self : Tuple )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*a_ ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*a_ ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*a_ ) def __lowercase( self : Dict )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*a_ ) def _a ( lowercase__ : Union[str, Any] ): '''simple docstring''' return torch.tensor( lowercase__ , dtype=torch.long , device=lowercase__ , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): @slow @unittest.skip('Model is not available.' ) def __lowercase( self : int )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.environ['MYDIR'] , a_ ) SCREAMING_SNAKE_CASE__ : Dict = MegatronBertModel.from_pretrained(a_ ) model.to(a_ ) model.half() SCREAMING_SNAKE_CASE__ : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )[0] SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , a_ ) SCREAMING_SNAKE_CASE__ : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): SCREAMING_SNAKE_CASE__ : List[str] = output[0, ii, jj] SCREAMING_SNAKE_CASE__ : List[Any] = expected[3 * ii + jj] SCREAMING_SNAKE_CASE__ : Dict = 'ii={} jj={} a={} b={}'.format(a_ , a_ , a_ , a_ ) self.assertTrue(math.isclose(a_ , a_ , rel_tol=a_ , abs_tol=a_ ) , msg=a_ )
636
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : int )-> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : Optional[int] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = '</s>' SCREAMING_SNAKE_CASE__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Dict )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(a_ ) , 1103 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example'] SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. @slow def __lowercase( self : Any )-> str: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> List[str]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : Tuple )-> str: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) @require_torch def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example'] SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids self.assertListEqual( a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
636
1
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def __lowercase( self : int , a_ : Any=0 )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = np.random.RandomState(a_ ) SCREAMING_SNAKE_CASE__ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : str = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : Tuple )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : int = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) SCREAMING_SNAKE_CASE__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) SCREAMING_SNAKE_CASE__ : int = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) SCREAMING_SNAKE_CASE__ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) SCREAMING_SNAKE_CASE__ : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : Dict )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) SCREAMING_SNAKE_CASE__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : int = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Tuple = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : List[Any] = 3 * [inputs.pop('prompt' )] SCREAMING_SNAKE_CASE__ : List[Any] = pipe.tokenizer( a_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=a_ , return_tensors='np' , ) SCREAMING_SNAKE_CASE__ : Any = text_inputs['input_ids'] SCREAMING_SNAKE_CASE__ : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = prompt_embeds # forward SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : List[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE__ : Dict = negative_prompt SCREAMING_SNAKE_CASE__ : Dict = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : str = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : int = 3 * [inputs.pop('prompt' )] SCREAMING_SNAKE_CASE__ : Dict = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE__ : Dict = pipe.tokenizer( a_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=a_ , return_tensors='np' , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_inputs['input_ids'] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = embeds # forward SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : int = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class snake_case ( unittest.TestCase ): @property def __lowercase( self : Tuple )-> Tuple: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ort.SessionOptions() SCREAMING_SNAKE_CASE__ : int = False return options def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'A painting of a squirrel eating a burger' np.random.seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = output.images SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Any = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = DDIMScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) SCREAMING_SNAKE_CASE__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = 'open neural network exchange' SCREAMING_SNAKE_CASE__ : List[str] = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type='np' ) SCREAMING_SNAKE_CASE__ : Any = output.images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Any )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) SCREAMING_SNAKE_CASE__ : str = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = 'open neural network exchange' SCREAMING_SNAKE_CASE__ : str = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type='np' ) SCREAMING_SNAKE_CASE__ : Dict = output.images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 def test_callback_fn(a_ : int , a_ : int , a_ : np.ndarray ) -> None: SCREAMING_SNAKE_CASE__ : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : Optional[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : str = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Tuple = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Any = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = 'Andromeda galaxy in a bottle' SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.random.RandomState(0 ) pipe( prompt=a_ , num_inference_steps=5 , guidance_scale=7.5 , generator=a_ , callback=a_ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(a_ , a_ ) assert pipe.safety_checker is None SCREAMING_SNAKE_CASE__ : Any = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(a_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None SCREAMING_SNAKE_CASE__ : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None
636
def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , lowercase__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
636
1
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) SCREAMING_SNAKE_CASE__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case : lowercase_ = field( default=UpperCamelCase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(UpperCamelCase_ )} ) lowercase_ = field( default=UpperCamelCase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowercase_ = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase_ = field( default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowercase_ = field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowercase_ = field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowercase_ = field( default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase_ = field( default=UpperCamelCase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowercase_ = field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase_ = field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase_ = field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowercase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class snake_case ( UpperCamelCase_ ): lowercase_ = 'train' lowercase_ = 'dev' class snake_case ( UpperCamelCase_ ): lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 def __init__( self : int , a_ : SquadDataTrainingArguments , a_ : PreTrainedTokenizer , a_ : Optional[int] = None , a_ : Union[str, Split] = Split.train , a_ : Optional[bool] = False , a_ : Optional[str] = None , a_ : Optional[str] = "pt" , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = args SCREAMING_SNAKE_CASE__ : List[Any] = is_language_sensitive SCREAMING_SNAKE_CASE__ : Any = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(a_ , a_ ): try: SCREAMING_SNAKE_CASE__ : Union[str, Any] = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) SCREAMING_SNAKE_CASE__ : Optional[int] = mode # Load data features from cache or dataset file SCREAMING_SNAKE_CASE__ : Any = 'v2' if args.version_2_with_negative else 'v1' SCREAMING_SNAKE_CASE__ : int = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE__ : List[str] = cached_features_file + '.lock' with FileLock(a_ ): if os.path.exists(a_ ) and not args.overwrite_cache: SCREAMING_SNAKE_CASE__ : Any = time.time() SCREAMING_SNAKE_CASE__ : int = torch.load(a_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. SCREAMING_SNAKE_CASE__ : Tuple = self.old_features['features'] SCREAMING_SNAKE_CASE__ : Tuple = self.old_features.get('dataset' , a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.old_features.get('examples' , a_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: SCREAMING_SNAKE_CASE__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: SCREAMING_SNAKE_CASE__ : List[Any] = self.processor.get_train_examples(args.data_dir ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , ) SCREAMING_SNAKE_CASE__ : List[Any] = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , a_ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : Union[str, Any] )-> Any: """simple docstring""" return len(self.features ) def __getitem__( self : List[Any] , a_ : str )-> Dict[str, torch.Tensor]: """simple docstring""" # Convert to Tensors and build dataset SCREAMING_SNAKE_CASE__ : int = self.features[i] SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(feature.input_ids , dtype=torch.long ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) SCREAMING_SNAKE_CASE__ : Any = torch.tensor(feature.token_type_ids , dtype=torch.long ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.cls_index , dtype=torch.long ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) SCREAMING_SNAKE_CASE__ : str = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
636
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _a ( lowercase__ : List[str] , lowercase__ : Dict ): '''simple docstring''' for i in range(config.num_hidden_layers ): SCREAMING_SNAKE_CASE__ : Dict = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :] def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = val @torch.no_grad() def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : str = False if "vqa" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : str = 31_29 SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json' SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Dict = idalabel SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ ) elif "nlvr" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'} SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()} SCREAMING_SNAKE_CASE__ : Tuple = 3 SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ ) elif "irtr" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ ) elif "mlm_itm" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict'] SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ ) if mlm_model or irtr_model: SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) # load state dict into HuggingFace model model.eval() if mlm_model: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowercase__ ) # Define processor SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 ) SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ ) # Forward pass on example inputs (image + text) if nlvr_model: SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : List[Any] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw ) if mlm_model: SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].' else: SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?' SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ ) # Verify outputs if mlm_model: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify masked token prediction equals "cats" SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify vqa prediction equals "2" SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
636
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786, 1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791, 1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409, 3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361 ] SCREAMING_SNAKE_CASE__ : int = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793, 1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675, 2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865, 4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362 ] class snake_case ( UpperCamelCase_ ): lowercase_ = 'whisper' lowercase_ = ['past_key_values'] lowercase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , a_ : int=5_1865 , a_ : Optional[int]=80 , a_ : Tuple=6 , a_ : Optional[Any]=4 , a_ : int=6 , a_ : int=4 , a_ : Tuple=1536 , a_ : Tuple=1536 , a_ : str=0.0 , a_ : int=0.0 , a_ : List[Any]=5_0257 , a_ : Any=True , a_ : str=True , a_ : Union[str, Any]="gelu" , a_ : Tuple=256 , a_ : Any=0.0 , a_ : str=0.0 , a_ : int=0.0 , a_ : List[str]=0.02 , a_ : str=False , a_ : List[str]=1500 , a_ : Tuple=448 , a_ : str=5_0256 , a_ : str=5_0256 , a_ : Dict=5_0256 , a_ : Optional[int]=None , a_ : List[Any]=[220, 5_0256] , a_ : List[Any]=False , a_ : Tuple=256 , a_ : Tuple=False , a_ : str=0.05 , a_ : int=10 , a_ : Union[str, Any]=2 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=10 , a_ : int=0 , a_ : Any=7 , **a_ : List[Any] , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : int = num_mel_bins SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layers SCREAMING_SNAKE_CASE__ : Any = decoder_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = decoder_ffn_dim SCREAMING_SNAKE_CASE__ : Any = encoder_ffn_dim SCREAMING_SNAKE_CASE__ : Optional[Any] = dropout SCREAMING_SNAKE_CASE__ : List[Any] = attention_dropout SCREAMING_SNAKE_CASE__ : Tuple = activation_dropout SCREAMING_SNAKE_CASE__ : List[Any] = activation_function SCREAMING_SNAKE_CASE__ : Optional[int] = init_std SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE__ : Any = decoder_layerdrop SCREAMING_SNAKE_CASE__ : Tuple = use_cache SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_layers SCREAMING_SNAKE_CASE__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE__ : Any = max_source_positions SCREAMING_SNAKE_CASE__ : Any = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE__ : Any = classifier_proj_size SCREAMING_SNAKE_CASE__ : str = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE__ : Any = apply_spec_augment SCREAMING_SNAKE_CASE__ : str = mask_time_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_time_length SCREAMING_SNAKE_CASE__ : Tuple = mask_time_min_masks SCREAMING_SNAKE_CASE__ : List[Any] = mask_feature_prob SCREAMING_SNAKE_CASE__ : int = mask_feature_length SCREAMING_SNAKE_CASE__ : int = mask_feature_min_masks SCREAMING_SNAKE_CASE__ : List[Any] = median_filter_width super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , ) class snake_case ( UpperCamelCase_ ): @property def __lowercase( self : List[str] )-> Mapping[str, Mapping[int, str]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: SCREAMING_SNAKE_CASE__ : Optional[int] = {0: 'batch'} else: SCREAMING_SNAKE_CASE__ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(a_ , direction='inputs' ) return common_inputs def __lowercase( self : int , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 2_2050 , a_ : float = 5.0 , a_ : int = 220 , )-> Mapping[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict() SCREAMING_SNAKE_CASE__ : int = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , ) SCREAMING_SNAKE_CASE__ : str = encoder_inputs['input_features'].shape[2] SCREAMING_SNAKE_CASE__ : Any = encoder_sequence_length // 2 if self.use_past else seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().generate_dummy_inputs( preprocessor.tokenizer , a_ , a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_inputs.pop('input_features' ) SCREAMING_SNAKE_CASE__ : Tuple = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: SCREAMING_SNAKE_CASE__ : Any = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def __lowercase( self : int )-> float: """simple docstring""" return 1e-3
636
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case : lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def __lowercase( self : Dict )-> Tuple: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def __lowercase( self : Dict )-> Union[str, Any]: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def __lowercase( self : Tuple )-> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack( [ pixel_indices % self.width, torch.div(a_ , self.width , rounding_mode='trunc' ), ] , axis=1 , ) return coords @property def __lowercase( self : Any )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords() SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 ) SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution() SCREAMING_SNAKE_CASE__ : str = self.fov() SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1 SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 ) SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 ) SCREAMING_SNAKE_CASE__ : str = ( self.z.view(a_ , 1 , 3 ) + self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:] ) SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.stack( [ torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(a_ , *a_ , 2 , 3 ) def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) SCREAMING_SNAKE_CASE__ : Tuple = -z * 4 SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ ) origins.append(lowercase__ ) xs.append(lowercase__ ) ys.append(lowercase__ ) zs.append(lowercase__ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
636
1
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) def _a ( lowercase__ : str , lowercase__ : int , lowercase__ : Any ): '''simple docstring''' return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def _a ( lowercase__ : np.ndarray , lowercase__ : Optional[str] , lowercase__ : Optional[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = to_pil_image(lowercase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = pil_image.size SCREAMING_SNAKE_CASE__ : Dict = pytesseract.image_to_data(lowercase__ , lang=lowercase__ , output_type='dict' , config=lowercase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE__ : List[str] = [idx for idx, word in enumerate(lowercase__ ) if not word.strip()] SCREAMING_SNAKE_CASE__ : Any = [word for idx, word in enumerate(lowercase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : str = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Optional[int] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE__ : Optional[int] = [] for x, y, w, h in zip(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : List[str] = [x, y, x + w, y + h] actual_boxes.append(lowercase__ ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE__ : List[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowercase__ , lowercase__ , lowercase__ ) ) assert len(lowercase__ ) == len(lowercase__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : float = 1 / 255 , a_ : bool = True , a_ : Union[float, Iterable[float]] = None , a_ : Union[float, Iterable[float]] = None , a_ : bool = True , a_ : Optional[str] = None , a_ : Optional[str] = "" , **a_ : Union[str, Any] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a_ ) SCREAMING_SNAKE_CASE__ : Dict = do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size SCREAMING_SNAKE_CASE__ : Tuple = resample SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale SCREAMING_SNAKE_CASE__ : str = rescale_value SCREAMING_SNAKE_CASE__ : str = do_normalize SCREAMING_SNAKE_CASE__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD SCREAMING_SNAKE_CASE__ : Dict = apply_ocr SCREAMING_SNAKE_CASE__ : Any = ocr_lang SCREAMING_SNAKE_CASE__ : Dict = tesseract_config def __lowercase( self : str , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , )-> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = get_size_dict(a_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = (size['height'], size['width']) return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ ) def __lowercase( self : List[Any] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , )-> np.ndarray: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : Any , a_ : np.ndarray , a_ : Union[float, Iterable[float]] , a_ : Union[float, Iterable[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Tuple , )-> np.ndarray: """simple docstring""" return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ ) def __lowercase( self : int , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : Union[str, Any]=None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Union[float, Iterable[float]] = None , a_ : Union[float, Iterable[float]] = None , a_ : bool = None , a_ : Optional[str] = None , a_ : Optional[str] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : List[str] , )-> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : str = get_size_dict(a_ ) SCREAMING_SNAKE_CASE__ : Dict = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ : int = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE__ : Optional[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE__ : List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE__ : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Any = [to_numpy_array(a_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , 'pytesseract' ) SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for image in images: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = apply_tesseract(a_ , a_ , a_ ) words_batch.append(a_ ) boxes_batch.append(a_ ) if do_resize: SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE__ : str = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Any = BatchFeature(data={'pixel_values': images} , tensor_type=a_ ) if apply_ocr: SCREAMING_SNAKE_CASE__ : Optional[int] = words_batch SCREAMING_SNAKE_CASE__ : Optional[Any] = boxes_batch return data
636
import requests SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(f'''{i}.) {article['title']}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
636
1
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def _a ( lowercase__ : str ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = np.max(_outputs , axis=-1 , keepdims=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ ) class snake_case ( UpperCamelCase_ ): lowercase_ = 'sigmoid' lowercase_ = 'softmax' lowercase_ = 'none' @add_end_docstrings( UpperCamelCase_ , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , ) class snake_case ( UpperCamelCase_ ): lowercase_ = False lowercase_ = ClassificationFunction.NONE def __init__( self : Tuple , **a_ : Dict )-> List[str]: """simple docstring""" super().__init__(**a_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowercase( self : Union[str, Any] , a_ : str=None , a_ : Any=None , a_ : Optional[Any]="" , **a_ : str )-> Optional[Any]: """simple docstring""" # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_kwargs SCREAMING_SNAKE_CASE__ : Tuple = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: SCREAMING_SNAKE_CASE__ : Dict = self.model.config.return_all_scores if isinstance(a_ , a_ ) or top_k is None: SCREAMING_SNAKE_CASE__ : int = top_k SCREAMING_SNAKE_CASE__ : List[str] = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a_ , ) if return_all_scores: SCREAMING_SNAKE_CASE__ : Tuple = None else: SCREAMING_SNAKE_CASE__ : Optional[int] = 1 if isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : int = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : List[str] , *a_ : List[Any] , **a_ : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(*a_ , **a_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. SCREAMING_SNAKE_CASE__ : int = 'top_k' not in kwargs if isinstance(args[0] , a_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowercase( self : str , a_ : Optional[Any] , **a_ : List[Any] )-> Dict[str, GenericTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.framework if isinstance(a_ , a_ ): return self.tokenizer(**a_ , return_tensors=a_ , **a_ ) elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ ) elif isinstance(a_ , a_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(a_ , return_tensors=a_ , **a_ ) def __lowercase( self : List[str] , a_ : Tuple )-> List[Any]: """simple docstring""" return self.model(**a_ ) def __lowercase( self : str , a_ : Tuple , a_ : Union[str, Any]=None , a_ : str=1 , a_ : Union[str, Any]=True )-> Any: """simple docstring""" # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.function_to_apply else: SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction.NONE SCREAMING_SNAKE_CASE__ : int = model_outputs['logits'][0] SCREAMING_SNAKE_CASE__ : Tuple = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(a_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: SCREAMING_SNAKE_CASE__ : str = softmax(a_ ) elif function_to_apply == ClassificationFunction.NONE: SCREAMING_SNAKE_CASE__ : Dict = outputs else: raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} SCREAMING_SNAKE_CASE__ : Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a_ ) ] if not _legacy: dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ ) if top_k is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = dict_scores[:top_k] return dict_scores
636
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger() @dataclass class snake_case : lowercase_ = 42 lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = field(default_factory=UpperCamelCase_ ) def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(a_ ) def __call__( self : Tuple , a_ : Tensor )-> Any: """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(a_ ) [x.remove() for x in self.handles] return self @property def __lowercase( self : Tuple )-> int: """simple docstring""" # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class snake_case : lowercase_ = 42 lowercase_ = 42 lowercase_ = 1 lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = True def __call__( self : List[Any] , a_ : Tensor )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) ) SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) ) if len(a_ ) != len(a_ ) and self.raise_if_mismatch: raise Exception( F'''Numbers of operations are different. Source module has {len(a_ )} operations while''' F''' destination module has {len(a_ )}.''' ) for dest_m, src_m in zip(a_ , a_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) class snake_case ( nn.Module ): def __init__( self : List[Any] , a_ : nn.Module )-> Dict: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), F'''Unexpected layer name {k}''' SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1 feature_blocks.append((F'''res{block_index}''', v) ) SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ ) def __lowercase( self : Tuple , a_ : Tensor )-> Dict: """simple docstring""" return get_trunk_forward_outputs( a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , ) class snake_case ( UpperCamelCase_ ): def __lowercase( self : Optional[Any] , a_ : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]: """simple docstring""" # default to timm! if x not in self: SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) ) else: SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ ) return val class snake_case ( UpperCamelCase_ ): def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]: """simple docstring""" if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE__ : Any = RegNetModel else: SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification return val def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ): '''simple docstring''' for from_key, to_key in keys: SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone() print(f'''Copied key={from_key} to={to_key}''' ) return to_state_dict def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func() SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(lowercase__ ) if from_state_dict is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ ) our_model.load_state_dict(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = ( our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1] assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84 # we can use the convnext one SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , ) print(f'''Pushed {name}''' ) def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE__ : Tuple = 10_00 SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels) SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = idalabel SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ), } SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap() SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' ) SCREAMING_SNAKE_CASE__ : Tuple = model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model'] SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk'] model.load_state_dict(lowercase__ ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE__ : Any = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : int = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : List[Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned SCREAMING_SNAKE_CASE__ : List[Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE__ : Any = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , ) return config, expected_shape if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
636
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class snake_case : def __init__( self : Tuple , a_ : Optional[int] , a_ : int = 13 , a_ : int = 64 , a_ : int = 2 , a_ : int = 3 , a_ : int = 3 , a_ : bool = True , a_ : bool = True , a_ : int = 128 , a_ : Dict=[16, 32, 64, 128] , a_ : int = 7 , a_ : int = 4 , a_ : int = 37 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 10 , a_ : float = 0.02 , a_ : int = 2 , a_ : int = 1 , a_ : int = 128 , a_ : List[int] = [2, 2, 2, 2] , a_ : int = 2 , a_ : int = 2 , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE__ : List[str] = image_size SCREAMING_SNAKE_CASE__ : Dict = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : List[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Any = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = encoder_stride SCREAMING_SNAKE_CASE__ : Any = num_attention_outputs SCREAMING_SNAKE_CASE__ : Optional[Any] = embed_dim SCREAMING_SNAKE_CASE__ : Tuple = embed_dim + 1 SCREAMING_SNAKE_CASE__ : Optional[int] = resolution SCREAMING_SNAKE_CASE__ : Any = depths SCREAMING_SNAKE_CASE__ : List[Any] = hidden_sizes SCREAMING_SNAKE_CASE__ : Dict = dim SCREAMING_SNAKE_CASE__ : Optional[int] = mlp_expansion_ratio def __lowercase( self : int )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : int = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Any = self.get_config() return config, pixel_values, labels def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def __lowercase( self : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : Dict )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFEfficientFormerModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Dict = model(a_ , training=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : Optional[int] , a_ : List[str] , a_ : int , a_ : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Dict = TFEfficientFormerForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Tuple = 1 SCREAMING_SNAKE_CASE__ : Tuple = TFEfficientFormerForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowercase_ = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : Dict )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = TFEfficientFormerModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester( self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def __lowercase( self : int )-> List[Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" pass def __lowercase( self : Optional[int] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : int )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : Union[str, Any] , a_ : Dict , a_ : Dict ): SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a_ ) , a_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): SCREAMING_SNAKE_CASE__ : Any = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: SCREAMING_SNAKE_CASE__ : Tuple = seq_length * self.model_tester.chunk_length else: SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: SCREAMING_SNAKE_CASE__ : Any = outputs.decoder_hidden_states self.asseretIsInstance(a_ , (list, tuple) ) self.assertEqual(len(a_ ) , a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(self.model_tester , 'seq_length' , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , a_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Any = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Dict , a_ : Dict , a_ : List[Any] , a_ : Any=False )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def __lowercase( self : Tuple )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def __lowercase( self : Dict )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFEfficientFormerModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self.model_tester , 'seq_length' , a_ ) SCREAMING_SNAKE_CASE__ : str = getattr(self.model_tester , 'encoder_seq_length' , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(self.model_tester , 'key_length' , a_ ) SCREAMING_SNAKE_CASE__ : Tuple = getattr(self.model_tester , 'chunk_length' , a_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = True SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : str = True SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes SCREAMING_SNAKE_CASE__ : Any = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } SCREAMING_SNAKE_CASE__ : Tuple = model(a_ ) self.assertTrue(outputs_dict is not None ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> Any: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def __lowercase( self : Dict )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img() SCREAMING_SNAKE_CASE__ : Tuple = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow def __lowercase( self : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img() SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
636
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'OwlViTImageProcessor' lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a_ , a_ ) def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )): SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )] elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ): SCREAMING_SNAKE_CASE__ : Any = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(a_ ) != max_num_queries: SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ )) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ ) encodings.append(a_ ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding() SCREAMING_SNAKE_CASE__ : List[str] = input_ids SCREAMING_SNAKE_CASE__ : Tuple = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ : Any = BatchEncoding() SCREAMING_SNAKE_CASE__ : Dict = self.image_processor( a_ , return_tensors=a_ , **a_ ).pixel_values SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]: """simple docstring""" return self.image_processor.post_process(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*a_ , **a_ ) def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*a_ , **a_ ) def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Tuple )-> Any: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , ) return self.image_processor_class @property def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , ) return self.image_processor
636
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html SCREAMING_SNAKE_CASE__ : Any = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class snake_case : lowercase_ = PegasusConfig lowercase_ = {} lowercase_ = 'gelu' def __init__( self : List[str] , a_ : Union[str, Any] , a_ : Dict=13 , a_ : List[Any]=7 , a_ : str=True , a_ : List[str]=False , a_ : List[Any]=99 , a_ : int=32 , a_ : Any=5 , a_ : str=4 , a_ : Tuple=37 , a_ : Optional[int]=0.1 , a_ : Dict=0.1 , a_ : List[Any]=20 , a_ : Any=2 , a_ : Optional[int]=1 , a_ : List[str]=0 , )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent SCREAMING_SNAKE_CASE__ : List[Any] = batch_size SCREAMING_SNAKE_CASE__ : int = seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = intermediate_size SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id SCREAMING_SNAKE_CASE__ : int = pad_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] = bos_token_id def __lowercase( self : Dict )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) SCREAMING_SNAKE_CASE__ : Tuple = prepare_pegasus_inputs_dict(a_ , a_ , a_ ) return config, inputs_dict def __lowercase( self : List[str] , a_ : Tuple , a_ : Tuple , a_ : Union[str, Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 20 SCREAMING_SNAKE_CASE__ : Dict = model_class_name(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model.encode(inputs_dict['input_ids'] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) SCREAMING_SNAKE_CASE__ : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE__ : str = model.decode( decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) SCREAMING_SNAKE_CASE__ : Tuple = model.decode( decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , ) SCREAMING_SNAKE_CASE__ : int = model.decode(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def __lowercase( self : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 20 SCREAMING_SNAKE_CASE__ : List[str] = model_class_name(a_ ) SCREAMING_SNAKE_CASE__ : Dict = model.encode(inputs_dict['input_ids'] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) SCREAMING_SNAKE_CASE__ : List[str] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE__ : List[Any] = model.decode( decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode( decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(a_ , a_ , decoder_attention_mask=a_ ) SCREAMING_SNAKE_CASE__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def _a ( lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : int=None , lowercase__ : Optional[int]=None , ): '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : str = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : int = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) lowercase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = FlaxPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=a_ ) def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : List[str] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(a_ , a_ , a_ ) def __lowercase( self : int )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ ) def __lowercase( self : Tuple )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ : Tuple = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ ) @jax.jit def encode_jitted(a_ : Optional[int] , a_ : Tuple=None , **a_ : List[str] ): return model.encode(input_ids=a_ , attention_mask=a_ ) with self.subTest('JIT Enabled' ): SCREAMING_SNAKE_CASE__ : List[str] = encode_jitted(**a_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ : Dict = encode_jitted(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) ) for jitted_output, output in zip(a_ , a_ ): self.assertEqual(jitted_output.shape , output.shape ) def __lowercase( self : List[str] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) SCREAMING_SNAKE_CASE__ : List[str] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(a_ : Dict , a_ : int , a_ : str ): return model.decode( decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , ) with self.subTest('JIT Enabled' ): SCREAMING_SNAKE_CASE__ : str = decode_jitted(**a_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = decode_jitted(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) ) for jitted_output, output in zip(a_ , a_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowercase( self : Any )-> int: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = model_class_name.from_pretrained('google/pegasus-large' , from_pt=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.ones((1, 1) ) SCREAMING_SNAKE_CASE__ : List[str] = model(a_ ) self.assertIsNotNone(a_ ) @slow def __lowercase( self : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) SCREAMING_SNAKE_CASE__ : str = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) SCREAMING_SNAKE_CASE__ : List[str] = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] SCREAMING_SNAKE_CASE__ : str = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] SCREAMING_SNAKE_CASE__ : Dict = tokenizer(a_ , return_tensors='np' , truncation=a_ , max_length=512 , padding=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(**a_ , num_beams=2 ).sequences SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) assert tgt_text == decoded
636
class snake_case ( UpperCamelCase_ ): pass class snake_case ( UpperCamelCase_ ): pass class snake_case : def __init__( self : Union[str, Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [ [], [], [], ] def __lowercase( self : int , a_ : int , a_ : int )-> None: """simple docstring""" try: if len(self.queues[priority] ) >= 100: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(a_ ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def __lowercase( self : int )-> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self : Any )-> str: """simple docstring""" return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class snake_case : def __init__( self : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = [] def __lowercase( self : List[str] , a_ : int )-> None: """simple docstring""" if len(self.queue ) == 100: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(a_ ) def __lowercase( self : int )-> int: """simple docstring""" if not self.queue: raise UnderFlowError('The queue is empty' ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue ) self.queue.remove(a_ ) return data def __str__( self : List[str] )-> str: """simple docstring""" return str(self.queue ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
636
1
from abc import ABC, abstractmethod from typing import List, Optional class snake_case ( UpperCamelCase_ ): def __init__( self : Tuple )-> Optional[int]: """simple docstring""" # test for the above condition self.test() def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Optional[Any] = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE__ : Tuple = self.advance() if not self.does_advance(a_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.update(a_ ) counter += 1 if counter > 1_0000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __lowercase( self : Union[str, Any] , a_ : int )-> List[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __lowercase( self : List[Any] , a_ : int )-> Union[str, Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __lowercase( self : Optional[Any] )-> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __lowercase( self : Optional[Any] , a_ : Tuple=False )-> Optional[int]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class snake_case ( UpperCamelCase_ ): def __init__( self : Union[str, Any] , a_ : List[int] )-> Any: """simple docstring""" super(a_ , self ).__init__() if not isinstance(a_ , a_ ) or len(a_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = token_ids SCREAMING_SNAKE_CASE__ : List[str] = len(self.token_ids ) SCREAMING_SNAKE_CASE__ : Dict = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE__ : Any = False def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowercase( self : Union[str, Any] , a_ : int )-> Union[str, Any]: """simple docstring""" if not isinstance(a_ , a_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowercase( self : str , a_ : int )-> Any: """simple docstring""" if not isinstance(a_ , a_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}''' ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False if self.does_advance(a_ ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE__ : Dict = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : int = completed else: # failed to make progress. SCREAMING_SNAKE_CASE__ : Optional[int] = True self.reset() return stepped, completed, reset def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Any = 0 def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __lowercase( self : Union[str, Any] , a_ : List[Any]=False )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.seqlen SCREAMING_SNAKE_CASE__ : Optional[int] = self.fulfilled_idx SCREAMING_SNAKE_CASE__ : str = self.completed return new_constraint class snake_case : def __init__( self : Tuple , a_ : List[List[int]] , a_ : Union[str, Any]=True )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = max([len(a_ ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE__ : List[Any] = root for tidx, token_id in enumerate(a_ ): if token_id not in level: SCREAMING_SNAKE_CASE__ : Optional[int] = {} SCREAMING_SNAKE_CASE__ : List[str] = level[token_id] if no_subsets and self.has_subsets(a_ , a_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = root def __lowercase( self : List[str] , a_ : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE__ : Optional[Any] = start[current_token] SCREAMING_SNAKE_CASE__ : str = list(start.keys() ) return next_tokens def __lowercase( self : Optional[int] , a_ : Optional[int] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.next_tokens(a_ ) return len(a_ ) == 0 def __lowercase( self : Tuple , a_ : Dict )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(root.values() ) if len(a_ ) == 0: return 1 else: return sum([self.count_leaves(a_ ) for nn in next_nodes] ) def __lowercase( self : str , a_ : List[str] , a_ : Optional[int] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.count_leaves(a_ ) return len(a_ ) != leaf_count class snake_case ( UpperCamelCase_ ): def __init__( self : Tuple , a_ : List[List[int]] )-> Optional[int]: """simple docstring""" super(a_ , self ).__init__() if not isinstance(a_ , a_ ) or len(a_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(a_ , a_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) SCREAMING_SNAKE_CASE__ : Dict = DisjunctiveTrie(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = nested_token_ids SCREAMING_SNAKE_CASE__ : Any = self.trie.max_height SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : List[str] = False def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.trie.next_tokens(self.current_seq ) if len(a_ ) == 0: return None else: return token_list def __lowercase( self : List[str] , a_ : int )-> str: """simple docstring""" if not isinstance(a_ , a_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}''' ) SCREAMING_SNAKE_CASE__ : List[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowercase( self : Tuple , a_ : int )-> List[str]: """simple docstring""" if not isinstance(a_ , a_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}''' ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Dict = False if self.does_advance(a_ ): self.current_seq.append(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = True else: SCREAMING_SNAKE_CASE__ : List[str] = True self.reset() SCREAMING_SNAKE_CASE__ : Tuple = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE__ : Tuple = completed return stepped, completed, reset def __lowercase( self : Tuple )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = [] def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowercase( self : Tuple , a_ : List[Any]=False )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE__ : List[Any] = self.seqlen SCREAMING_SNAKE_CASE__ : List[Any] = self.current_seq SCREAMING_SNAKE_CASE__ : Any = self.completed return new_constraint class snake_case : def __init__( self : Any , a_ : List[Constraint] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE__ : Any = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE__ : Dict = len(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = False self.init_state() def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = [constraint.copy(stateful=a_ ) for constraint in self.constraints] def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowercase( self : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE__ : str = constraint.advance() if isinstance(a_ , a_ ): token_list.append(a_ ) elif isinstance(a_ , a_ ): token_list.extend(a_ ) else: SCREAMING_SNAKE_CASE__ : List[str] = self.inprogress_constraint.advance() if isinstance(a_ , a_ ): token_list.append(a_ ) elif isinstance(a_ , a_ ): token_list.extend(a_ ) if len(a_ ) == 0: return None else: return token_list def __lowercase( self : Tuple , a_ : Optional[List[int]] )-> List[str]: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.add(a_ ) # the entire list of constraints are fulfilled if self.completed: break def __lowercase( self : List[str] , a_ : int )-> Tuple: """simple docstring""" if not isinstance(a_ , a_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = False, False if self.completed: SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : str = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.inprogress_constraint.update(a_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) ) SCREAMING_SNAKE_CASE__ : List[str] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE__ : Dict = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE__ : Any = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(a_ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pending_constraint.update(a_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(a_ ) SCREAMING_SNAKE_CASE__ : int = None if not complete and stepped: SCREAMING_SNAKE_CASE__ : Any = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE__ : int = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE__ : Dict = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowercase( self : int , a_ : Optional[int]=True )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ constraint.copy(stateful=a_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE__ : Any = self.inprogress_constraint.copy(stateful=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = [constraint.copy() for constraint in self.pending_constraints] return new_state
636
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _a ( lowercase__ : List[str] ): '''simple docstring''' if not is_accelerate_available(): return method SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase__ ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *lowercase__ , **lowercase__ ) return wrapper
636
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = scope SCREAMING_SNAKE_CASE__ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2 def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : int = model(a_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" pass def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a_ ), *get_values(a_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ): SCREAMING_SNAKE_CASE__ : int = problem_type['title'] SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels'] SCREAMING_SNAKE_CASE__ : str = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a_ ) as warning_list: SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : int )-> Dict: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img() SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowercase( self : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
636
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _a ( lowercase__ : int ): '''simple docstring''' if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ): return False return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule ) def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : Tuple = model SCREAMING_SNAKE_CASE__ : int = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : Any = model.module if not keep_fpaa_wrapper: SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' ) SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ ) if original_forward is not None: while hasattr(lowercase__ , '__wrapped__' ): SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__ if forward == original_forward: break SCREAMING_SNAKE_CASE__ : Dict = forward if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ): convert_model(lowercase__ , to_transformer_engine=lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : List[Any] = model SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model return model def _a ( ): '''simple docstring''' PartialState().wait_for_everyone() def _a ( lowercase__ : str , lowercase__ : Optional[Any] ): '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(lowercase__ , lowercase__ ) elif PartialState().local_process_index == 0: torch.save(lowercase__ , lowercase__ ) @contextmanager def _a ( **lowercase__ : str ): '''simple docstring''' for key, value in kwargs.items(): SCREAMING_SNAKE_CASE__ : int = str(lowercase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ): SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ ) if hasattr(lowercase__ , '__qualname__' ): return obj.__qualname__ if hasattr(lowercase__ , '__name__' ): return obj.__name__ return str(lowercase__ ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' for key, value in source.items(): if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} ) merge_dicts(lowercase__ , lowercase__ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = value return destination def _a ( lowercase__ : int = None ): '''simple docstring''' if port is None: SCREAMING_SNAKE_CASE__ : int = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
636
1
from __future__ import annotations def _a ( lowercase__ : int | str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = str(lowercase__ ) return n == n[::-1] def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 0 for i in range(1 , lowercase__ ): if is_palindrome(lowercase__ ) and is_palindrome(bin(lowercase__ ).split('b' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
636
from __future__ import annotations def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ): '''simple docstring''' if len(lowercase__ ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(lowercase__ ) or left < -len(lowercase__ ) or right >= len(lowercase__ ) or right < -len(lowercase__ ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid] SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
636
1
from __future__ import annotations from collections.abc import Iterator class snake_case : def __init__( self : Union[str, Any] , a_ : int )-> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = value SCREAMING_SNAKE_CASE__ : Node | None = None SCREAMING_SNAKE_CASE__ : Node | None = None class snake_case : def __init__( self : Any , a_ : Node )-> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = tree def __lowercase( self : List[str] , a_ : Node | None )-> int: """simple docstring""" if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Any )-> Iterator[int]: """simple docstring""" yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
636
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _a ( lowercase__ : Any ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [state.process_index] SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _a ( lowercase__ : int ): '''simple docstring''' if state.is_main_process: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device ) else: SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device ) SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' main() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = PartialState() state.print(f'''State: {state}''' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
636
1
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( lowercase__ : int = 50_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase__ )] for i, pentagonal_i in enumerate(lowercase__ ): for j in range(lowercase__ , len(lowercase__ ) ): SCREAMING_SNAKE_CASE__ : str = pentagonal_nums[j] SCREAMING_SNAKE_CASE__ : List[str] = pentagonal_i + pentagonal_j SCREAMING_SNAKE_CASE__ : Tuple = pentagonal_j - pentagonal_i if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
636
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE__ : Any = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : Any = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size SCREAMING_SNAKE_CASE__ : List[str] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : List[Any] = size SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096] SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16} def __lowercase( self : Optional[Any] )-> str: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowercase( self : Dict )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self ) @property def __lowercase( self : Dict )-> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE__ : List[Any] = 2048 SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __lowercase( self : Any )-> Tuple: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : Any )-> Any: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 SCREAMING_SNAKE_CASE__ : int = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(a_ ): SCREAMING_SNAKE_CASE__ : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello' SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processor( a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : str = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : int = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 ) SCREAMING_SNAKE_CASE__ : Dict = 3 @property def __lowercase( self : Any )-> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
636
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _a ( lowercase__ : Optional[int] ): '''simple docstring''' def wrapper(*lowercase__ : str , **lowercase__ : List[str] ): SCREAMING_SNAKE_CASE__ : Optional[int] = timeit.default_timer() SCREAMING_SNAKE_CASE__ : Tuple = func(*lowercase__ , **lowercase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit.default_timer() - starttime return delta SCREAMING_SNAKE_CASE__ : List[str] = func.__name__ return wrapper def _a ( lowercase__ : dict , lowercase__ : List[str]=1_00 , lowercase__ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = seq_shapes or {} for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : str = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(lowercase__ , _ArrayXD ): SCREAMING_SNAKE_CASE__ : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(lowercase__ , datasets.Value ): if v.dtype == "string": SCREAMING_SNAKE_CASE__ : int = 'The small grey turtle was surprisingly fast when challenged.' else: SCREAMING_SNAKE_CASE__ : Tuple = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(lowercase__ , datasets.Sequence ): while isinstance(lowercase__ , datasets.Sequence ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = v.feature SCREAMING_SNAKE_CASE__ : Any = seq_shapes[k] SCREAMING_SNAKE_CASE__ : Tuple = np.random.rand(*lowercase__ ).astype(v.dtype ) SCREAMING_SNAKE_CASE__ : Optional[Any] = data dummy_data.append((i, example) ) return dummy_data def _a ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=1_00 , lowercase__ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = generate_examples(lowercase__ , num_examples=lowercase__ , seq_shapes=lowercase__ ) with ArrowWriter(features=lowercase__ , path=lowercase__ ) as writer: for key, record in dummy_data: SCREAMING_SNAKE_CASE__ : List[str] = features.encode_example(lowercase__ ) writer.write(lowercase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.Dataset.from_file(filename=lowercase__ , info=datasets.DatasetInfo(features=lowercase__ ) ) return dataset
636
import heapq as hq import math from collections.abc import Iterator class snake_case : def __init__( self : str , a_ : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = str(id_ ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance} def __lt__( self : int , a_ : Tuple )-> Union[str, Any]: """simple docstring""" return self.key < other.key def __repr__( self : Any )-> Dict: """simple docstring""" return self.id def __lowercase( self : Optional[Any] , a_ : int )-> List[str]: """simple docstring""" self.neighbors.append(a_ ) def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = weight def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowercase__ ) graph[b - 1].add_edge(graph[a - 1] , lowercase__ ) def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] for u in graph: SCREAMING_SNAKE_CASE__ : Dict = math.inf SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : int = graph[:] while q: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ ) q.remove(lowercase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : int = u SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id] for i in range(1 , len(lowercase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' for u in graph: SCREAMING_SNAKE_CASE__ : List[str] = math.inf SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ ) hq.heapify(lowercase__ ) while h: SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : List[str] = u SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id] hq.heapify(lowercase__ ) for i in range(1 , len(lowercase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
636
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowercase__ : Optional[int] , lowercase__ : List[str]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('module.cls_token', 'vit.embeddings.cls_token'), ('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('module.pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('module.norm.weight', 'layernorm.weight'), ('module.norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _a ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE__ : Any = '' else: SCREAMING_SNAKE_CASE__ : List[str] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE__ : str = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : int = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-config.hidden_size :] def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( lowercase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = [ 'module.fc.fc1.weight', 'module.fc.fc1.bias', 'module.fc.bn1.weight', 'module.fc.bn1.bias', 'module.fc.bn1.running_mean', 'module.fc.bn1.running_var', 'module.fc.bn1.num_batches_tracked', 'module.fc.fc2.weight', 'module.fc.fc2.bias', 'module.fc.bn2.weight', 'module.fc.bn2.bias', 'module.fc.bn2.running_mean', 'module.fc.bn2.running_var', 'module.fc.bn2.num_batches_tracked', 'module.fc.fc3.weight', 'module.fc.fc3.bias', ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = dct.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = val def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNConfig() SCREAMING_SNAKE_CASE__ : int = 10_00 SCREAMING_SNAKE_CASE__ : Optional[int] = 'datasets/huggingface/label-files' SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE__ : Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = 3_84 SCREAMING_SNAKE_CASE__ : List[str] = 15_36 SCREAMING_SNAKE_CASE__ : Optional[int] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] = 10_24 SCREAMING_SNAKE_CASE__ : List[str] = 40_96 SCREAMING_SNAKE_CASE__ : Optional[int] = 24 SCREAMING_SNAKE_CASE__ : int = 16 SCREAMING_SNAKE_CASE__ : Tuple = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE__ : List[Any] = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = 7 SCREAMING_SNAKE_CASE__ : List[str] = 10_24 SCREAMING_SNAKE_CASE__ : Any = 40_96 SCREAMING_SNAKE_CASE__ : Dict = 24 SCREAMING_SNAKE_CASE__ : Tuple = 16 SCREAMING_SNAKE_CASE__ : int = 0.1 SCREAMING_SNAKE_CASE__ : Optional[int] = ViTMSNModel(lowercase__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['target_encoder'] SCREAMING_SNAKE_CASE__ : Tuple = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = create_rename_keys(lowercase__ , base_model=lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ , base_model=lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor( size=config.image_size , image_mean=lowercase__ , image_std=lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = image_processor(images=lowercase__ , return_tensors='pt' ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE__ : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , lowercase__ , atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
636
def _a ( lowercase__ : int , lowercase__ : int ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def _a ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
636
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class snake_case ( nn.Module ): def __init__( self : str , a_ : int = 16 , a_ : int = 88 , a_ : Optional[int] = None , a_ : int = 1 , a_ : float = 0.0 , a_ : int = 32 , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "geglu" , a_ : Optional[int] = None , )-> Optional[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=a_ , attention_head_dim=a_ , in_channels=a_ , num_layers=a_ , dropout=a_ , norm_num_groups=a_ , cross_attention_dim=a_ , attention_bias=a_ , sample_size=a_ , num_vector_embeds=a_ , activation_fn=a_ , num_embeds_ada_norm=a_ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference SCREAMING_SNAKE_CASE__ : Optional[int] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` SCREAMING_SNAKE_CASE__ : str = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` SCREAMING_SNAKE_CASE__ : str = [1, 0] def __lowercase( self : Dict , a_ : Tuple , a_ : List[Any] , a_ : int=None , a_ : Dict=None , a_ : int=None , a_ : bool = True , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = hidden_states SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Any = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens SCREAMING_SNAKE_CASE__ : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] SCREAMING_SNAKE_CASE__ : Tuple = self.transformer_index_for_condition[i] SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.transformers[transformer_index]( a_ , encoder_hidden_states=a_ , timestep=a_ , cross_attention_kwargs=a_ , return_dict=a_ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] SCREAMING_SNAKE_CASE__ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) SCREAMING_SNAKE_CASE__ : Optional[int] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=a_ )
636
from math import factorial, radians def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__("doctest").testmod()
636
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart SCREAMING_SNAKE_CASE__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } SCREAMING_SNAKE_CASE__ : Dict = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE__ : str = bs[:] SCREAMING_SNAKE_CASE__ : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE__ : Any = [chr(lowercase__ ) for n in cs] return dict(zip(lowercase__ , lowercase__ ) ) def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = set() SCREAMING_SNAKE_CASE__ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE__ : Tuple = char return pairs class snake_case ( UpperCamelCase_ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ['input_ids', 'attention_mask'] def __init__( self : Dict , a_ : Optional[int] , a_ : str , a_ : Union[str, Any]="replace" , a_ : Optional[int]="<s>" , a_ : int="</s>" , a_ : Optional[Any]="</s>" , a_ : List[str]="<s>" , a_ : Union[str, Any]="<unk>" , a_ : Union[str, Any]="<pad>" , a_ : Tuple="<mask>" , a_ : Optional[int]=False , **a_ : Union[str, Any] , )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else bos_token SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else sep_token SCREAMING_SNAKE_CASE__ : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else cls_token SCREAMING_SNAKE_CASE__ : Dict = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( errors=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , **a_ , ) with open(a_ , encoding='utf-8' ) as vocab_handle: SCREAMING_SNAKE_CASE__ : Any = json.load(a_ ) SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE__ : Dict = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE__ : Tuple = bytes_to_unicode() SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in self.byte_encoder.items()} with open(a_ , encoding='utf-8' ) as merges_handle: SCREAMING_SNAKE_CASE__ : Optional[int] = merges_handle.read().split('\n' )[1:-1] SCREAMING_SNAKE_CASE__ : Any = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE__ : int = dict(zip(a_ , range(len(a_ ) ) ) ) SCREAMING_SNAKE_CASE__ : Any = {} SCREAMING_SNAKE_CASE__ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE__ : Any = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def __lowercase( self : Optional[Any] )-> int: """simple docstring""" return len(self.encoder ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __lowercase( self : str , a_ : int )-> Any: """simple docstring""" if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE__ : str = tuple(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_pairs(a_ ) if not pairs: return token while True: SCREAMING_SNAKE_CASE__ : List[str] = min(a_ , key=lambda a_ : self.bpe_ranks.get(a_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = bigram SCREAMING_SNAKE_CASE__ : List[str] = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 while i < len(a_ ): try: SCREAMING_SNAKE_CASE__ : Optional[Any] = word.index(a_ , a_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE__ : str = j if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE__ : Tuple = tuple(a_ ) SCREAMING_SNAKE_CASE__ : Any = new_word if len(a_ ) == 1: break else: SCREAMING_SNAKE_CASE__ : str = get_pairs(a_ ) SCREAMING_SNAKE_CASE__ : str = ' '.join(a_ ) SCREAMING_SNAKE_CASE__ : Any = word return word def __lowercase( self : Optional[int] , a_ : str )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = [] for token in re.findall(self.pat , a_ ): SCREAMING_SNAKE_CASE__ : Dict = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(' ' ) ) return bpe_tokens def __lowercase( self : List[Any] , a_ : Any )-> Union[str, Any]: """simple docstring""" return self.encoder.get(a_ , self.encoder.get(self.unk_token ) ) def __lowercase( self : List[str] , a_ : Dict )-> Optional[Any]: """simple docstring""" return self.decoder.get(a_ ) def __lowercase( self : List[str] , a_ : Optional[int] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ''.join(a_ ) SCREAMING_SNAKE_CASE__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def __lowercase( self : Any , a_ : str , a_ : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not os.path.isdir(a_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Dict = os.path.join( a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE__ : int = os.path.join( a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(a_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + '\n' ) SCREAMING_SNAKE_CASE__ : List[Any] = 0 with open(a_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) SCREAMING_SNAKE_CASE__ : Tuple = token_index writer.write(' '.join(a_ ) + '\n' ) index += 1 return vocab_file, merge_file def __lowercase( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowercase( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False )-> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is None: return [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1] def __lowercase( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None )-> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=False , **a_ : Any )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE__ : int = ' ' + text return (text, kwargs)
636
import math def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = factor * value SCREAMING_SNAKE_CASE__ : Dict = value while not is_prime(lowercase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **lowercase__ ) return value
636
1
from functools import lru_cache @lru_cache def _a ( lowercase__ : int ): '''simple docstring''' if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
636
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = scope SCREAMING_SNAKE_CASE__ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2 def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : int = model(a_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" pass def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a_ ), *get_values(a_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ): SCREAMING_SNAKE_CASE__ : int = problem_type['title'] SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels'] SCREAMING_SNAKE_CASE__ : str = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a_ ) as warning_list: SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : int )-> Dict: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img() SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowercase( self : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
636
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[int] = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
636
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Dict = batch_size SCREAMING_SNAKE_CASE__ : Dict = seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = num_labels SCREAMING_SNAKE_CASE__ : Dict = num_choices SCREAMING_SNAKE_CASE__ : str = scope def __lowercase( self : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase( self : Dict )-> Tuple: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ ) model.to(a_ ) model.eval() # create attention mask SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 # first forward pass SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens # append to next input_ids and attn_mask SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Dict = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , ) # get two different outputs SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval() SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) # first forward pass SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[ 'last_hidden_state' ] # select random slice SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ ) model.to(a_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.num_labels SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase( self : Any )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowercase_ = (BioGptForCausalLM,) if is_torch_available() else () lowercase_ = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def __lowercase( self : Tuple )-> int: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ : List[str] = type self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : int )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ ) def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*a_ ) def __lowercase( self : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*a_ ) @slow def __lowercase( self : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : List[str] = 'left' # Define PAD Token = EOS Token = 50256 SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id # use different length sentences to test batching SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = model.generate( input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(a_ , a_ ) self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] = 3 SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : str = 3 SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification' SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class snake_case ( unittest.TestCase ): @slow def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0] SCREAMING_SNAKE_CASE__ : List[str] = 4_2384 SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , a_ ) SCREAMING_SNAKE_CASE__ : int = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) ) @slow def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( **a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , ) SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(a_ , a_ )
636
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class snake_case ( UpperCamelCase_ ): lowercase_ = 'xlm-roberta-xl' def __init__( self : List[Any] , a_ : Optional[int]=25_0880 , a_ : Optional[Any]=2560 , a_ : List[Any]=36 , a_ : Union[str, Any]=32 , a_ : List[Any]=1_0240 , a_ : Union[str, Any]="gelu" , a_ : Optional[Any]=0.1 , a_ : int=0.1 , a_ : List[str]=514 , a_ : List[str]=1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=1e-0_5 , a_ : Any=1 , a_ : int=0 , a_ : List[Any]=2 , a_ : Any="absolute" , a_ : str=True , a_ : Any=None , **a_ : Union[str, Any] , )-> int: """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : int = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE__ : Any = intermediate_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE__ : str = position_embedding_type SCREAMING_SNAKE_CASE__ : str = use_cache SCREAMING_SNAKE_CASE__ : int = classifier_dropout class snake_case ( UpperCamelCase_ ): @property def __lowercase( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE__ : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
636
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random() def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ): '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = parent SCREAMING_SNAKE_CASE__ : List[Any] = batch_size SCREAMING_SNAKE_CASE__ : str = min_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__ : int = feature_size SCREAMING_SNAKE_CASE__ : str = padding_value SCREAMING_SNAKE_CASE__ : Any = sampling_rate SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize SCREAMING_SNAKE_CASE__ : int = num_mel_bins SCREAMING_SNAKE_CASE__ : int = hop_length SCREAMING_SNAKE_CASE__ : str = win_length SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function SCREAMING_SNAKE_CASE__ : List[str] = fmin SCREAMING_SNAKE_CASE__ : Dict = fmax SCREAMING_SNAKE_CASE__ : int = mel_floor SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask def __lowercase( self : Dict )-> Dict: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]: """simple docstring""" def _flatten(a_ : int ): return list(itertools.chain(*a_ ) ) if equal_length: SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]: """simple docstring""" if equal_length: SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Tuple = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = SpeechTaFeatureExtractor def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self ) def __lowercase( self : Any , a_ : Optional[int] )-> List[str]: """simple docstring""" self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) ) def __lowercase( self : Tuple )-> Dict: """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None] for max_length, padding in zip(a_ , a_ ): SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 ) SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths] SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None] for max_length, padding in zip(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ ) SCREAMING_SNAKE_CASE__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __lowercase( self : int )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract( a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __lowercase( self : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : List[str] = feat_extract( a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : str = feat_extract( a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __lowercase( self : Any )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : Dict )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __lowercase( self : Tuple )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , a_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ ) def __lowercase( self : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : str = min(a_ ) SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : int = feat_extract.pad( a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' ) self.assertIn('attention_mask' , a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __lowercase( self : Optional[int] , a_ : List[str] )-> Any: """simple docstring""" from datasets import load_dataset SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def __lowercase( self : List[str] )-> List[Any]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor( [2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3, 3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3, 2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4, 4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3, 7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4, 4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] ) # fmt: on SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) ) def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
636
1
def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , lowercase__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
636
import math import sys def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '' try: with open(lowercase__ , 'rb' ) as binary_file: SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', '' SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ ) for i in range(len(lowercase__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string] result += last_match_id SCREAMING_SNAKE_CASE__ : str = last_match_id + '0' if math.loga(lowercase__ ).is_integer(): SCREAMING_SNAKE_CASE__ : List[str] = {} for curr_key in list(lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1' index += 1 SCREAMING_SNAKE_CASE__ : Tuple = '' return result def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = 8 try: with open(lowercase__ , 'wb' ) as opened_file: SCREAMING_SNAKE_CASE__ : Dict = [ to_write[i : i + byte_length] for i in range(0 , len(lowercase__ ) , lowercase__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 0 for letter in data_bits: if letter == "1": break counter += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:] SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :] return data_bits def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ ) SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ ) write_file_binary(lowercase__ , lowercase__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
636
1
from math import ceil def _a ( lowercase__ : int = 10_01 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): SCREAMING_SNAKE_CASE__ : List[Any] = 2 * i + 1 SCREAMING_SNAKE_CASE__ : Tuple = 2 * i SCREAMING_SNAKE_CASE__ : List[str] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: SCREAMING_SNAKE_CASE__ : Optional[int] = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number")
636
def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} ) SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'} for i in range(len(lowercase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowercase__ ) == 0 def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' ) if is_balanced(lowercase__ ): print(lowercase__ , 'is balanced' ) else: print(lowercase__ , 'is not balanced' ) if __name__ == "__main__": main()
636
1
def _a ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = [False] * len(lowercase__ ) SCREAMING_SNAKE_CASE__ : Dict = [] queue.append(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = True while queue: SCREAMING_SNAKE_CASE__ : List[Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Tuple = u return visited[t] def _a ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = [-1] * (len(lowercase__ )) SCREAMING_SNAKE_CASE__ : Optional[int] = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : List[Any] = float('Inf' ) SCREAMING_SNAKE_CASE__ : List[Any] = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE__ : Optional[int] = min(lowercase__ , graph[parent[s]][s] ) SCREAMING_SNAKE_CASE__ : List[Any] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE__ : Dict = sink while v != source: SCREAMING_SNAKE_CASE__ : Tuple = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE__ : Dict = parent[v] return max_flow SCREAMING_SNAKE_CASE__ : Optional[int] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
636
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : int )-> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : Optional[int] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = '</s>' SCREAMING_SNAKE_CASE__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Dict )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(a_ ) , 1103 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example'] SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. @slow def __lowercase( self : Any )-> str: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> List[str]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : Tuple )-> str: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) @require_torch def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example'] SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids self.assertListEqual( a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
636
1
SCREAMING_SNAKE_CASE__ : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = input('Enter message: ' ) SCREAMING_SNAKE_CASE__ : Tuple = input('Enter key [alphanumeric]: ' ) SCREAMING_SNAKE_CASE__ : str = input('Encrypt/Decrypt [e/d]: ' ) if mode.lower().startswith('e' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'encrypt' SCREAMING_SNAKE_CASE__ : int = encrypt_message(lowercase__ , lowercase__ ) elif mode.lower().startswith('d' ): SCREAMING_SNAKE_CASE__ : Optional[int] = 'decrypt' SCREAMING_SNAKE_CASE__ : Optional[int] = decrypt_message(lowercase__ , lowercase__ ) print(f'''\n{mode.title()}ed message:''' ) print(lowercase__ ) def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' return translate_message(lowercase__ , lowercase__ , 'encrypt' ) def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' return translate_message(lowercase__ , lowercase__ , 'decrypt' ) def _a ( lowercase__ : str , lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.upper() for symbol in message: SCREAMING_SNAKE_CASE__ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowercase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowercase__ ): SCREAMING_SNAKE_CASE__ : Dict = 0 else: translated.append(lowercase__ ) return "".join(lowercase__ ) if __name__ == "__main__": main()
636
def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , lowercase__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
636
1
from __future__ import annotations def _a ( lowercase__ : str , lowercase__ : list[str] | None = None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = word_bank or [] # create a table SCREAMING_SNAKE_CASE__ : int = len(lowercase__ ) + 1 SCREAMING_SNAKE_CASE__ : list[list[list[str]]] = [] for _ in range(lowercase__ ): table.append([] ) # seed value SCREAMING_SNAKE_CASE__ : List[Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase__ )] == word: SCREAMING_SNAKE_CASE__ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase__ )]: combination.reverse() return table[len(lowercase__ )] if __name__ == "__main__": print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) print( all_construct( "hexagonosaurus", ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], ) )
636
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _a ( lowercase__ : List[str] , lowercase__ : Dict ): '''simple docstring''' for i in range(config.num_hidden_layers ): SCREAMING_SNAKE_CASE__ : Dict = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :] def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = val @torch.no_grad() def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : str = False if "vqa" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : str = 31_29 SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json' SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Dict = idalabel SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ ) elif "nlvr" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'} SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()} SCREAMING_SNAKE_CASE__ : Tuple = 3 SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ ) elif "irtr" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ ) elif "mlm_itm" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict'] SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ ) if mlm_model or irtr_model: SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) # load state dict into HuggingFace model model.eval() if mlm_model: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowercase__ ) # Define processor SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 ) SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ ) # Forward pass on example inputs (image + text) if nlvr_model: SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : List[Any] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw ) if mlm_model: SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].' else: SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?' SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ ) # Verify outputs if mlm_model: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify masked token prediction equals "cats" SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify vqa prediction equals "2" SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
636
1
import math def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowercase__ ) def _a ( lowercase__ : float = 1 / 1_23_45 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = 0 SCREAMING_SNAKE_CASE__ : Optional[Any] = 3 while True: SCREAMING_SNAKE_CASE__ : List[str] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowercase__ ): SCREAMING_SNAKE_CASE__ : str = int(lowercase__ ) total_partitions += 1 if check_partition_perfect(lowercase__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowercase__ ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
636
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case : lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def __lowercase( self : Dict )-> Tuple: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def __lowercase( self : Dict )-> Union[str, Any]: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def __lowercase( self : Tuple )-> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack( [ pixel_indices % self.width, torch.div(a_ , self.width , rounding_mode='trunc' ), ] , axis=1 , ) return coords @property def __lowercase( self : Any )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords() SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 ) SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution() SCREAMING_SNAKE_CASE__ : str = self.fov() SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1 SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 ) SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 ) SCREAMING_SNAKE_CASE__ : str = ( self.z.view(a_ , 1 , 3 ) + self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:] ) SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.stack( [ torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(a_ , *a_ , 2 , 3 ) def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) SCREAMING_SNAKE_CASE__ : Tuple = -z * 4 SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ ) origins.append(lowercase__ ) xs.append(lowercase__ ) ys.append(lowercase__ ) zs.append(lowercase__ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
636
1
SCREAMING_SNAKE_CASE__ : Dict = 8.314_4598 def _a ( lowercase__ : float , lowercase__ : float ): '''simple docstring''' if temperature < 0: raise Exception('Temperature cannot be less than 0 K' ) if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example SCREAMING_SNAKE_CASE__ : List[Any] = 300 SCREAMING_SNAKE_CASE__ : Optional[int] = 28 SCREAMING_SNAKE_CASE__ : Dict = rms_speed_of_molecule(temperature, molar_mass) print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
636
import requests SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(f'''{i}.) {article['title']}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
636
1
from typing import List from .keymap import KEYMAP, get_character def _a ( lowercase__ : str ): '''simple docstring''' def decorator(lowercase__ : Tuple ): SCREAMING_SNAKE_CASE__ : Tuple = getattr(lowercase__ , 'handle_key' , [] ) handle += [key] setattr(lowercase__ , 'handle_key' , lowercase__ ) return func return decorator def _a ( *lowercase__ : List[str] ): '''simple docstring''' def decorator(lowercase__ : List[Any] ): SCREAMING_SNAKE_CASE__ : str = getattr(lowercase__ , 'handle_key' , [] ) handle += keys setattr(lowercase__ , 'handle_key' , lowercase__ ) return func return decorator class snake_case ( UpperCamelCase_ ): def __new__( cls : Any , a_ : int , a_ : Tuple , a_ : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = super().__new__(cls , a_ , a_ , a_ ) if not hasattr(a_ , 'key_handler' ): setattr(a_ , 'key_handler' , {} ) setattr(a_ , 'handle_input' , KeyHandler.handle_input ) for value in attrs.values(): SCREAMING_SNAKE_CASE__ : Tuple = getattr(a_ , 'handle_key' , [] ) for key in handled_keys: SCREAMING_SNAKE_CASE__ : Optional[int] = value return new_cls @staticmethod def __lowercase( cls : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = get_character() if char != KEYMAP["undefined"]: SCREAMING_SNAKE_CASE__ : List[Any] = ord(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = cls.key_handler.get(a_ ) if handler: SCREAMING_SNAKE_CASE__ : Union[str, Any] = char return handler(cls ) else: return None def _a ( cls : str ): '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
636
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger() @dataclass class snake_case : lowercase_ = 42 lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = field(default_factory=UpperCamelCase_ ) def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(a_ ) def __call__( self : Tuple , a_ : Tensor )-> Any: """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(a_ ) [x.remove() for x in self.handles] return self @property def __lowercase( self : Tuple )-> int: """simple docstring""" # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class snake_case : lowercase_ = 42 lowercase_ = 42 lowercase_ = 1 lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = True def __call__( self : List[Any] , a_ : Tensor )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) ) SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) ) if len(a_ ) != len(a_ ) and self.raise_if_mismatch: raise Exception( F'''Numbers of operations are different. Source module has {len(a_ )} operations while''' F''' destination module has {len(a_ )}.''' ) for dest_m, src_m in zip(a_ , a_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) class snake_case ( nn.Module ): def __init__( self : List[Any] , a_ : nn.Module )-> Dict: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), F'''Unexpected layer name {k}''' SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1 feature_blocks.append((F'''res{block_index}''', v) ) SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ ) def __lowercase( self : Tuple , a_ : Tensor )-> Dict: """simple docstring""" return get_trunk_forward_outputs( a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , ) class snake_case ( UpperCamelCase_ ): def __lowercase( self : Optional[Any] , a_ : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]: """simple docstring""" # default to timm! if x not in self: SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) ) else: SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ ) return val class snake_case ( UpperCamelCase_ ): def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]: """simple docstring""" if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE__ : Any = RegNetModel else: SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification return val def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ): '''simple docstring''' for from_key, to_key in keys: SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone() print(f'''Copied key={from_key} to={to_key}''' ) return to_state_dict def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func() SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(lowercase__ ) if from_state_dict is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ ) our_model.load_state_dict(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = ( our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1] assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84 # we can use the convnext one SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , ) print(f'''Pushed {name}''' ) def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE__ : Tuple = 10_00 SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels) SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = idalabel SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ), } SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap() SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' ) SCREAMING_SNAKE_CASE__ : Tuple = model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model'] SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk'] model.load_state_dict(lowercase__ ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE__ : Any = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : int = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : List[Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned SCREAMING_SNAKE_CASE__ : List[Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE__ : Any = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , ) return config, expected_shape if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
636
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast SCREAMING_SNAKE_CASE__ : List[str] = TaTokenizerFast SCREAMING_SNAKE_CASE__ : Optional[int] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
636
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'OwlViTImageProcessor' lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a_ , a_ ) def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )): SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )] elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ): SCREAMING_SNAKE_CASE__ : Any = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(a_ ) != max_num_queries: SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ )) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ ) encodings.append(a_ ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding() SCREAMING_SNAKE_CASE__ : List[str] = input_ids SCREAMING_SNAKE_CASE__ : Tuple = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ : Any = BatchEncoding() SCREAMING_SNAKE_CASE__ : Dict = self.image_processor( a_ , return_tensors=a_ , **a_ ).pixel_values SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]: """simple docstring""" return self.image_processor.post_process(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*a_ , **a_ ) def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*a_ , **a_ ) def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Tuple )-> Any: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , ) return self.image_processor_class @property def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , ) return self.image_processor
636
1
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) # TODO Update this SCREAMING_SNAKE_CASE__ : Tuple = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case ( UpperCamelCase_ ): lowercase_ = 'esm' def __init__( self : int , a_ : Dict=None , a_ : Any=None , a_ : List[Any]=None , a_ : Dict=768 , a_ : int=12 , a_ : Optional[Any]=12 , a_ : Tuple=3072 , a_ : Dict=0.1 , a_ : Any=0.1 , a_ : List[str]=1026 , a_ : List[Any]=0.02 , a_ : Optional[Any]=1e-1_2 , a_ : int="absolute" , a_ : List[str]=True , a_ : List[Any]=None , a_ : str=False , a_ : Any=False , a_ : List[str]=None , a_ : str=None , **a_ : List[str] , )-> List[str]: """simple docstring""" super().__init__(pad_token_id=a_ , mask_token_id=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : Dict = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE__ : str = position_embedding_type SCREAMING_SNAKE_CASE__ : Dict = use_cache SCREAMING_SNAKE_CASE__ : List[str] = emb_layer_norm_before SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_dropout SCREAMING_SNAKE_CASE__ : Dict = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) SCREAMING_SNAKE_CASE__ : List[Any] = EsmFoldConfig() elif isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[str] = EsmFoldConfig(**a_ ) SCREAMING_SNAKE_CASE__ : Any = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) SCREAMING_SNAKE_CASE__ : int = get_default_vocab_list() else: SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_list else: SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : List[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , a_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def __lowercase( self : Union[str, Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , a_ ): SCREAMING_SNAKE_CASE__ : int = self.esmfold_config.to_dict() return output @dataclass class snake_case : lowercase_ = None lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = 128 lowercase_ = None def __lowercase( self : List[str] )-> Dict: """simple docstring""" if self.trunk is None: SCREAMING_SNAKE_CASE__ : str = TrunkConfig() elif isinstance(self.trunk , a_ ): SCREAMING_SNAKE_CASE__ : Dict = TrunkConfig(**self.trunk ) def __lowercase( self : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = asdict(self ) SCREAMING_SNAKE_CASE__ : List[str] = self.trunk.to_dict() return output @dataclass class snake_case : lowercase_ = 48 lowercase_ = 1_024 lowercase_ = 128 lowercase_ = 32 lowercase_ = 32 lowercase_ = 32 lowercase_ = 0 lowercase_ = 0 lowercase_ = False lowercase_ = 4 lowercase_ = 128 lowercase_ = None def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" if self.structure_module is None: SCREAMING_SNAKE_CASE__ : Dict = StructureModuleConfig() elif isinstance(self.structure_module , a_ ): SCREAMING_SNAKE_CASE__ : Dict = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) SCREAMING_SNAKE_CASE__ : int = self.sequence_state_dim // self.sequence_head_width SCREAMING_SNAKE_CASE__ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def __lowercase( self : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = asdict(self ) SCREAMING_SNAKE_CASE__ : List[str] = self.structure_module.to_dict() return output @dataclass class snake_case : lowercase_ = 384 lowercase_ = 128 lowercase_ = 16 lowercase_ = 128 lowercase_ = 12 lowercase_ = 4 lowercase_ = 8 lowercase_ = 0.1 lowercase_ = 8 lowercase_ = 1 lowercase_ = 2 lowercase_ = 7 lowercase_ = 10 lowercase_ = 1e-8 lowercase_ = 1e5 def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" return asdict(self ) def _a ( ): '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
636
class snake_case ( UpperCamelCase_ ): pass class snake_case ( UpperCamelCase_ ): pass class snake_case : def __init__( self : Union[str, Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [ [], [], [], ] def __lowercase( self : int , a_ : int , a_ : int )-> None: """simple docstring""" try: if len(self.queues[priority] ) >= 100: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(a_ ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def __lowercase( self : int )-> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self : Any )-> str: """simple docstring""" return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class snake_case : def __init__( self : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = [] def __lowercase( self : List[str] , a_ : int )-> None: """simple docstring""" if len(self.queue ) == 100: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(a_ ) def __lowercase( self : int )-> int: """simple docstring""" if not self.queue: raise UnderFlowError('The queue is empty' ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue ) self.queue.remove(a_ ) return data def __str__( self : List[str] )-> str: """simple docstring""" return str(self.queue ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
636
1
import os import pytest from attr import dataclass SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region @dataclass class snake_case : lowercase_ = 42 lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase_ = {**hyperparameters, 'max_steps': 1_000} @property def __lowercase( self : Optional[Any] )-> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase( self : int )-> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __lowercase( self : Optional[int] )-> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __lowercase( self : Optional[int] )-> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
636
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _a ( lowercase__ : List[str] ): '''simple docstring''' if not is_accelerate_available(): return method SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase__ ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *lowercase__ , **lowercase__ ) return wrapper
636
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCamelCase_ ) , 'Tatoeba directory does not exist.' ) class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : str )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = tempfile.mkdtemp() return TatoebaConverter(save_dir=a_ ) @slow def __lowercase( self : Union[str, Any] )-> Dict: """simple docstring""" self.resolver.convert_models(['heb-eng'] ) @slow def __lowercase( self : Optional[int] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.resolver.write_model_card('opus-mt-he-en' , dry_run=a_ ) assert mmeta["long_pair"] == "heb-eng"
636
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _a ( lowercase__ : int ): '''simple docstring''' if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ): return False return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule ) def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : Tuple = model SCREAMING_SNAKE_CASE__ : int = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : Any = model.module if not keep_fpaa_wrapper: SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' ) SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ ) if original_forward is not None: while hasattr(lowercase__ , '__wrapped__' ): SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__ if forward == original_forward: break SCREAMING_SNAKE_CASE__ : Dict = forward if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ): convert_model(lowercase__ , to_transformer_engine=lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : List[Any] = model SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model return model def _a ( ): '''simple docstring''' PartialState().wait_for_everyone() def _a ( lowercase__ : str , lowercase__ : Optional[Any] ): '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(lowercase__ , lowercase__ ) elif PartialState().local_process_index == 0: torch.save(lowercase__ , lowercase__ ) @contextmanager def _a ( **lowercase__ : str ): '''simple docstring''' for key, value in kwargs.items(): SCREAMING_SNAKE_CASE__ : int = str(lowercase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ): SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ ) if hasattr(lowercase__ , '__qualname__' ): return obj.__qualname__ if hasattr(lowercase__ , '__name__' ): return obj.__name__ return str(lowercase__ ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' for key, value in source.items(): if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} ) merge_dicts(lowercase__ , lowercase__ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = value return destination def _a ( lowercase__ : int = None ): '''simple docstring''' if port is None: SCREAMING_SNAKE_CASE__ : int = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
636
1
def _a ( ): '''simple docstring''' for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 while i * i <= n: SCREAMING_SNAKE_CASE__ : List[Any] = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _a ( ): '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(lowercase__ ) > 5_00 ) if __name__ == "__main__": print(solution())
636
from __future__ import annotations def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ): '''simple docstring''' if len(lowercase__ ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(lowercase__ ) or left < -len(lowercase__ ) or right >= len(lowercase__ ) or right < -len(lowercase__ ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid] SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
636
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
636
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _a ( lowercase__ : Any ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [state.process_index] SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _a ( lowercase__ : int ): '''simple docstring''' if state.is_main_process: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device ) else: SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device ) SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' main() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = PartialState() state.print(f'''State: {state}''' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
636
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : Optional[Any] , a_ : Tuple )-> str: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): SCREAMING_SNAKE_CASE__ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(a_ ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : List[str] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = 'sgugger/tiny-distilbert-classification' SCREAMING_SNAKE_CASE__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) SCREAMING_SNAKE_CASE__ : Optional[int] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , torchscript=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , fpaa=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoConfig.from_pretrained(a_ ) # set architectures equal to `None` SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def __lowercase( self : Dict )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a_ , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Dict = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Any )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tinier_bart' SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : int = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Dict = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase( self : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'sshleifer/tinier_bart' SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(a_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(a_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(a_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(a_ , 'env.csv' ) , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'env.csv' ) ).exists() ) def __lowercase( self : str )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(a_ : Optional[int] ): self.assertTrue(hasattr(a_ , 'sequential' ) ) self.assertTrue(hasattr(a_ , 'cumulative' ) ) self.assertTrue(hasattr(a_ , 'current' ) ) self.assertTrue(hasattr(a_ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , 'log.txt' ) , log_print=a_ , trace_memory_line_by_line=a_ , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(a_ , 'log.txt' ) ).exists() )
636
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE__ : Any = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : Any = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size SCREAMING_SNAKE_CASE__ : List[str] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : List[Any] = size SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096] SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16} def __lowercase( self : Optional[Any] )-> str: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowercase( self : Dict )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self ) @property def __lowercase( self : Dict )-> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE__ : List[Any] = 2048 SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __lowercase( self : Any )-> Tuple: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : Any )-> Any: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 SCREAMING_SNAKE_CASE__ : int = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(a_ ): SCREAMING_SNAKE_CASE__ : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello' SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processor( a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : str = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : int = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 ) SCREAMING_SNAKE_CASE__ : Dict = 3 @property def __lowercase( self : Any )-> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
636
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE__ : str = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE__ : int = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE__ : Tuple = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE__ : int = [64, 80, 96] SCREAMING_SNAKE_CASE__ : int = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE__ : List[Any] = 0.05 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2.0 if mobilevit_name.startswith('deeplabv3_' ): SCREAMING_SNAKE_CASE__ : List[str] = 5_12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 16 SCREAMING_SNAKE_CASE__ : Dict = 21 SCREAMING_SNAKE_CASE__ : Any = 'pascal-voc-id2label.json' else: SCREAMING_SNAKE_CASE__ : str = 10_00 SCREAMING_SNAKE_CASE__ : Dict = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE__ : Dict = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = idalabel SCREAMING_SNAKE_CASE__ : Any = {v: k for k, v in idalabel.items()} return config def _a ( lowercase__ : Optional[Any] , lowercase__ : Dict=False ): '''simple docstring''' for i in range(1 , 6 ): if f'''layer_{i}.''' in name: SCREAMING_SNAKE_CASE__ : int = name.replace(f'''layer_{i}.''' , f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: SCREAMING_SNAKE_CASE__ : str = name.replace('conv_1.' , 'conv_stem.' ) if ".block." in name: SCREAMING_SNAKE_CASE__ : Tuple = name.replace('.block.' , '.' ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE__ : str = name.replace('exp_1x1' , 'expand_1x1' ) if "red_1x1" in name: SCREAMING_SNAKE_CASE__ : Dict = name.replace('red_1x1' , 'reduce_1x1' ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE__ : str = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' ) if ".norm." in name: SCREAMING_SNAKE_CASE__ : int = name.replace('.norm.' , '.normalization.' ) if ".conv." in name: SCREAMING_SNAKE_CASE__ : Tuple = name.replace('.conv.' , '.convolution.' ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE__ : Any = name.replace('.conv_proj.' , '.conv_projection.' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if f'''.{i}.{j}.''' in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(f'''.{i}.{j}.''' , f'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if f'''.{i}.{j}.''' in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace(f'''.{i}.{j}.''' , f'''.{i}.''' ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE__ : str = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' ) for i in range(2 , 5 ): if f'''.global_rep.{i}.weight''' in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(f'''.global_rep.{i}.weight''' , '.layernorm.weight' ) if f'''.global_rep.{i}.bias''' in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace(f'''.global_rep.{i}.bias''' , '.layernorm.bias' ) if ".global_rep." in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace('.global_rep.' , '.transformer.' ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE__ : str = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE__ : int = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE__ : Any = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace('.pre_norm_ffn.4.' , '.output.dense.' ) if ".transformer." in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('.transformer.' , '.transformer.layer.' ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE__ : Tuple = name.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace('.aspp_pool.' , '.' ) if "seg_head." in name: SCREAMING_SNAKE_CASE__ : Any = name.replace('seg_head.' , 'segmentation_head.' ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE__ : Tuple = name.replace('classifier.fc.' , 'classifier.' ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'mobilevit.' + name return name def _a ( lowercase__ : int , lowercase__ : Any , lowercase__ : Any=False ): '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE__ : Tuple = '' else: SCREAMING_SNAKE_CASE__ : Dict = 'mobilevit.' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Any = orig_state_dict.pop(lowercase__ ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE__ : Tuple = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE__ : Tuple = key.split('.' ) SCREAMING_SNAKE_CASE__ : List[str] = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE__ : Optional[Any] = int(key_split[3] ) SCREAMING_SNAKE_CASE__ : Any = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) SCREAMING_SNAKE_CASE__ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE__ : Dict = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: SCREAMING_SNAKE_CASE__ : Any = val[:dim, :] SCREAMING_SNAKE_CASE__ : Dict = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ : int = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ : int = val[:dim] SCREAMING_SNAKE_CASE__ : List[Any] = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ : int = val[-dim:] else: SCREAMING_SNAKE_CASE__ : List[Any] = val return orig_state_dict def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def _a ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = get_mobilevit_config(lowercase__ ) # load original state_dict SCREAMING_SNAKE_CASE__ : List[str] = torch.load(lowercase__ , map_location='cpu' ) # load 🤗 model if mobilevit_name.startswith('deeplabv3_' ): SCREAMING_SNAKE_CASE__ : Dict = MobileViTForSemanticSegmentation(lowercase__ ).eval() else: SCREAMING_SNAKE_CASE__ : Optional[int] = MobileViTForImageClassification(lowercase__ ).eval() SCREAMING_SNAKE_CASE__ : Any = convert_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE__ : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=prepare_img() , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : Dict = model(**lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits if mobilevit_name.startswith('deeplabv3_' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE__ : str = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase__ ) if push_to_hub: SCREAMING_SNAKE_CASE__ : Optional[int] = { 'mobilevit_s': 'mobilevit-small', 'mobilevit_xs': 'mobilevit-x-small', 'mobilevit_xxs': 'mobilevit-xx-small', 'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small', 'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small', 'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small', } print('Pushing to the hub...' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowercase__ , organization='apple' ) model.push_to_hub(lowercase__ , organization='apple' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--mobilevit_name", default="mobilevit_s", type=str, help=( "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs'," " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'." ), ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
636
import heapq as hq import math from collections.abc import Iterator class snake_case : def __init__( self : str , a_ : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = str(id_ ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance} def __lt__( self : int , a_ : Tuple )-> Union[str, Any]: """simple docstring""" return self.key < other.key def __repr__( self : Any )-> Dict: """simple docstring""" return self.id def __lowercase( self : Optional[Any] , a_ : int )-> List[str]: """simple docstring""" self.neighbors.append(a_ ) def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = weight def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowercase__ ) graph[b - 1].add_edge(graph[a - 1] , lowercase__ ) def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] for u in graph: SCREAMING_SNAKE_CASE__ : Dict = math.inf SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : int = graph[:] while q: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ ) q.remove(lowercase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : int = u SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id] for i in range(1 , len(lowercase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' for u in graph: SCREAMING_SNAKE_CASE__ : List[str] = math.inf SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ ) hq.heapify(lowercase__ ) while h: SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : List[str] = u SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id] hq.heapify(lowercase__ ) for i in range(1 , len(lowercase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
636
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : str = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = XLMRobertaTokenizer lowercase_ = XLMRobertaTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Any )-> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaTokenizer(a_ , keep_accents=a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = '<pad>' SCREAMING_SNAKE_CASE__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 1002 ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaTokenizer(a_ , keep_accents=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize('This is a test' ) self.assertListEqual(a_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_ids_to_tokens(a_ ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __lowercase( self : Tuple )-> Optional[int]: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer_class.from_pretrained(a_ , **a_ ) SCREAMING_SNAKE_CASE__ : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Any = tokenizer_r.save_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_p.save_pretrained(a_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE__ : List[str] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(a_ , a_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer_p.from_pretrained(a_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a_ , a_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(a_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE__ : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Any = tokenizer_r.save_pretrained(a_ , legacy_format=a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer_p.save_pretrained(a_ ) # Checks it save with the same files self.assertSequenceEqual(a_ , a_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : int = tokenizer_p.from_pretrained(a_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a_ , a_ ) ) shutil.rmtree(a_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE__ : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.save_pretrained(a_ , legacy_format=a_ ) SCREAMING_SNAKE_CASE__ : int = tokenizer_p.save_pretrained(a_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_r.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_p.from_pretrained(a_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a_ , a_ ) ) shutil.rmtree(a_ ) @cached_property def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' ) def __lowercase( self : int )-> Optional[int]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(a_ , f.name ) SCREAMING_SNAKE_CASE__ : int = XLMRobertaTokenizer(f.name , keep_accents=a_ ) SCREAMING_SNAKE_CASE__ : Dict = pickle.dumps(a_ ) pickle.loads(a_ ) def __lowercase( self : Union[str, Any] )-> Dict: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : List[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : List[Any] = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(a_ ) SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __lowercase( self : int )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'Hello World!' SCREAMING_SNAKE_CASE__ : List[Any] = [0, 3_5378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) ) @slow def __lowercase( self : List[str] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) SCREAMING_SNAKE_CASE__ : List[str] = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 17_9459, 12_4850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 1_0114, 711, 152, 20, 6, 5, 2_2376, 642, 1221, 1_5190, 3_4153, 450, 5608, 959, 1119, 5_7702, 136, 186, 47, 1098, 2_9367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 5_0901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) ) @slow def __lowercase( self : str )-> Dict: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : int = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
636
def _a ( lowercase__ : int , lowercase__ : int ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def _a ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
636
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self : Tuple , a_ : Optional[Any] , a_ : List[Any]=13 , a_ : Tuple=7 , a_ : Dict=True , a_ : List[str]=True , a_ : str=True , a_ : str=True , a_ : Any=True , a_ : str=False , a_ : int=False , a_ : Union[str, Any]=False , a_ : Tuple=2 , a_ : int=99 , a_ : Optional[Any]=0 , a_ : int=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : str=512 , a_ : List[str]=2 , a_ : Tuple=0.02 , a_ : Dict=2 , a_ : Dict=4 , a_ : Union[str, Any]="last" , a_ : str=True , a_ : Optional[int]=None , a_ : Dict=0 , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : Tuple = seq_length SCREAMING_SNAKE_CASE__ : Any = is_training SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_lengths SCREAMING_SNAKE_CASE__ : List[Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels SCREAMING_SNAKE_CASE__ : Dict = gelu_activation SCREAMING_SNAKE_CASE__ : Any = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ : Tuple = causal SCREAMING_SNAKE_CASE__ : Optional[Any] = asm SCREAMING_SNAKE_CASE__ : Optional[Any] = n_langs SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] = n_special SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ : str = num_labels SCREAMING_SNAKE_CASE__ : str = num_choices SCREAMING_SNAKE_CASE__ : int = summary_type SCREAMING_SNAKE_CASE__ : Dict = use_proj SCREAMING_SNAKE_CASE__ : Any = scope SCREAMING_SNAKE_CASE__ : Dict = bos_token_id def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Optional[int] = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ : Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ : List[str] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Any = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowercase( self : Dict )-> Optional[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __lowercase( self : Optional[int] , a_ : List[Any] , a_ : int , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] , a_ : str , a_ : List[str] , a_ : List[Any] , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = XLMModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(a_ , lengths=a_ , langs=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , langs=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : Union[str, Any] , a_ : List[Any] , a_ : Optional[int] , a_ : int , a_ : str , a_ : int , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : List[str] , )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = XLMWithLMHeadModel(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : Any , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : List[Any] , a_ : Optional[int] , a_ : int , a_ : Union[str, Any] , )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = XLMForQuestionAnsweringSimple(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , start_positions=a_ , end_positions=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowercase( self : str , a_ : Optional[Any] , a_ : str , a_ : str , a_ : List[str] , a_ : Union[str, Any] , a_ : List[Any] , a_ : Optional[Any] , a_ : Optional[Any] , a_ : List[Any] , )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = XLMForQuestionAnswering(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(a_ ) SCREAMING_SNAKE_CASE__ : int = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , ) SCREAMING_SNAKE_CASE__ : Any = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , ) ((SCREAMING_SNAKE_CASE__) , ) : Dict = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ : Dict = model(a_ , start_positions=a_ , end_positions=a_ ) ((SCREAMING_SNAKE_CASE__) , ) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowercase( self : Tuple , a_ : str , a_ : Optional[Any] , a_ : Dict , a_ : List[Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Any , a_ : List[str] , )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = XLMForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : int = model(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : Any , a_ : Any , a_ : Optional[Any] , a_ : Dict , a_ : int , a_ : str , a_ : Tuple , a_ : int , a_ : Optional[int] , a_ : int , )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = XLMForTokenClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase( self : List[Any] , a_ : str , a_ : List[Any] , a_ : str , a_ : int , a_ : Any , a_ : Dict , a_ : int , a_ : List[Any] , a_ : Optional[Any] , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_choices SCREAMING_SNAKE_CASE__ : Optional[int] = XLMForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : int = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase_ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase_ = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def __lowercase( self : Any , a_ : Any , a_ : Dict , a_ : List[Any] , a_ : List[str] , a_ : str )-> Tuple: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowercase( self : Optional[int] , a_ : Dict , a_ : List[str] , a_ : Dict=False )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = XLMModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self , config_class=a_ , emb_dim=37 ) def __lowercase( self : Union[str, Any] )-> Dict: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : str )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*a_ ) def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*a_ ) def __lowercase( self : Tuple )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*a_ ) def __lowercase( self : Tuple )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*a_ ) def __lowercase( self : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*a_ ) def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*a_ ) def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*a_ ) def __lowercase( self : Optional[Any] , a_ : Any , a_ : Any , a_ : str , a_ : Tuple , a_ : str , a_ : Optional[Any]=False , a_ : Dict=1 )-> List[Any]: """simple docstring""" self.assertIsInstance(a_ , a_ ) self.assertListEqual( [isinstance(a_ , a_ ) for iter_attentions in attentions] , [True] * len(a_ ) ) self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(a_ ): # adds PAD dummy token SCREAMING_SNAKE_CASE__ : Dict = min_length + idx + 1 SCREAMING_SNAKE_CASE__ : str = min_length + idx + 1 SCREAMING_SNAKE_CASE__ : Tuple = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a_ ) ) def __lowercase( self : List[str] , a_ : List[str] , a_ : Dict , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : Dict=False , a_ : List[str]=1 )-> List[str]: """simple docstring""" self.assertIsInstance(a_ , a_ ) self.assertListEqual( [isinstance(a_ , a_ ) for iter_hidden_states in hidden_states] , [True] * len(a_ ) , ) self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(a_ ): # adds PAD dummy token SCREAMING_SNAKE_CASE__ : List[str] = min_length + idx + 1 SCREAMING_SNAKE_CASE__ : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a_ ) , ) pass @slow def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = XLMModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch class snake_case ( unittest.TestCase ): @slow def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=a_ ) # the president SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(a_ , do_sample=a_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a_ )
636
from math import factorial, radians def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__("doctest").testmod()
636
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ['prompt'] lowercase_ = ['prompt'] lowercase_ = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] lowercase_ = False @property def __lowercase( self : int )-> Dict: """simple docstring""" return 32 @property def __lowercase( self : List[str] )-> int: """simple docstring""" return 32 @property def __lowercase( self : Dict )-> Dict: """simple docstring""" return self.time_input_dim * 4 @property def __lowercase( self : List[str] )-> Dict: """simple docstring""" return 8 @property def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __lowercase( self : str )-> Dict: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(a_ ) @property def __lowercase( self : List[Any] )-> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } SCREAMING_SNAKE_CASE__ : List[Any] = PriorTransformer(**a_ ) return model @property def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } SCREAMING_SNAKE_CASE__ : Optional[Any] = ShapERenderer(**a_ ) return model def __lowercase( self : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.dummy_prior SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_text_encoder SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_tokenizer SCREAMING_SNAKE_CASE__ : Any = self.dummy_renderer SCREAMING_SNAKE_CASE__ : Tuple = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowercase( self : Optional[Any] , a_ : Union[str, Any] , a_ : List[Any]=0 )-> List[str]: """simple docstring""" if str(a_ ).startswith('mps' ): SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(a_ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 'cpu' SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Any = self.pipeline_class(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs(a_ ) ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) SCREAMING_SNAKE_CASE__ : Tuple = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowercase( self : Optional[Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = torch_device == 'cpu' SCREAMING_SNAKE_CASE__ : Optional[int] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , ) def __lowercase( self : Dict )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[Any] = self.pipeline_class(**a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : str = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ ) for key in inputs.keys(): if key in self.batch_params: SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size * [inputs[key]] SCREAMING_SNAKE_CASE__ : List[str] = pipe(**a_ , num_images_per_prompt=a_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) SCREAMING_SNAKE_CASE__ : Dict = ShapEPipeline.from_pretrained('openai/shap-e' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=a_ ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : int = pipe( 'a shark' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(a_ , a_ )
636
import math def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = factor * value SCREAMING_SNAKE_CASE__ : Dict = value while not is_prime(lowercase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **lowercase__ ) return value
636
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , **a_ : Any , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {'shortest_edge': 224} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a_ , default_to_square=a_ ) SCREAMING_SNAKE_CASE__ : int = crop_size if crop_size is not None else {'height': 256, 'width': 256} SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(a_ , param_name='crop_size' ) SCREAMING_SNAKE_CASE__ : Any = do_resize SCREAMING_SNAKE_CASE__ : Any = size SCREAMING_SNAKE_CASE__ : int = resample SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale SCREAMING_SNAKE_CASE__ : List[Any] = rescale_factor SCREAMING_SNAKE_CASE__ : Tuple = do_center_crop SCREAMING_SNAKE_CASE__ : Dict = crop_size SCREAMING_SNAKE_CASE__ : str = do_flip_channel_order def __lowercase( self : str , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PIL.Image.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , )-> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(a_ , default_to_square=a_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(a_ , size=size['shortest_edge'] , default_to_square=a_ ) return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ ) def __lowercase( self : int , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , )-> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(a_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(a_ , size=(size['height'], size['width']) , data_format=a_ , **a_ ) def __lowercase( self : Optional[int] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , )-> Tuple: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : str , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None )-> np.ndarray: """simple docstring""" return flip_channel_order(a_ , data_format=a_ ) def __lowercase( self : List[Any] , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Dict[str, int] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : Union[str, Any] , )-> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : List[Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE__ : Tuple = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(a_ , default_to_square=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(a_ , param_name='crop_size' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Tuple = [to_numpy_array(a_ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE__ : Tuple = [self.center_crop(image=a_ , size=a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : List[Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: SCREAMING_SNAKE_CASE__ : str = [self.flip_channel_order(image=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ ) def __lowercase( self : List[Any] , a_ : Optional[Any] , a_ : List[Tuple] = None )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(a_ ) != len(a_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(a_ ): SCREAMING_SNAKE_CASE__ : Optional[int] = target_sizes.numpy() SCREAMING_SNAKE_CASE__ : Dict = [] for idx in range(len(a_ ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a_ ) SCREAMING_SNAKE_CASE__ : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(a_ ) else: SCREAMING_SNAKE_CASE__ : List[str] = logits.argmax(dim=1 ) SCREAMING_SNAKE_CASE__ : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
636
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = scope SCREAMING_SNAKE_CASE__ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2 def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : int = model(a_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" pass def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a_ ), *get_values(a_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ): SCREAMING_SNAKE_CASE__ : int = problem_type['title'] SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels'] SCREAMING_SNAKE_CASE__ : str = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a_ ) as warning_list: SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : int )-> Dict: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img() SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowercase( self : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
636
1
from math import sqrt def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" SCREAMING_SNAKE_CASE__ : Optional[Any] = True # 0 and 1 are none primes. if number <= 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = False for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: SCREAMING_SNAKE_CASE__ : Any = False break # precondition assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool" return status def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N SCREAMING_SNAKE_CASE__ : Tuple = list(range(2 , n + 1 ) ) SCREAMING_SNAKE_CASE__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowercase__ ) ): for j in range(i + 1 , len(lowercase__ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): SCREAMING_SNAKE_CASE__ : Tuple = 0 # filters actual prime numbers. SCREAMING_SNAKE_CASE__ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list" return ans def _a ( lowercase__ : Union[str, Any] ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2" SCREAMING_SNAKE_CASE__ : int = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowercase__ ): ans.append(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list" return ans def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0" SCREAMING_SNAKE_CASE__ : Any = [] # this list will be returns of the function. # potential prime number factors. SCREAMING_SNAKE_CASE__ : Tuple = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = number if number == 0 or number == 1: ans.append(lowercase__ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowercase__ ): while quotient != 1: if is_prime(lowercase__ ) and (quotient % factor == 0): ans.append(lowercase__ ) quotient /= factor else: factor += 1 else: ans.append(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list" return ans def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" SCREAMING_SNAKE_CASE__ : str = 0 # prime factorization of 'number' SCREAMING_SNAKE_CASE__ : Optional[Any] = prime_factorization(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = max(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int" return ans def _a ( lowercase__ : Tuple ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" SCREAMING_SNAKE_CASE__ : Any = 0 # prime factorization of 'number' SCREAMING_SNAKE_CASE__ : List[str] = prime_factorization(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = min(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int" return ans def _a ( lowercase__ : Tuple ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool" return number % 2 == 0 def _a ( lowercase__ : Any ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool" return number % 2 != 0 def _a ( lowercase__ : int ): '''simple docstring''' assert ( isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ ) ), "'number' must been an int, even and > 2" SCREAMING_SNAKE_CASE__ : str = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_prime_numbers(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = len(lowercase__ ) # run variable for while-loops. SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : int = None # exit variable. for break up the loops SCREAMING_SNAKE_CASE__ : List[str] = True while i < len_pn and loop: SCREAMING_SNAKE_CASE__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: SCREAMING_SNAKE_CASE__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowercase__ , lowercase__ ) and (len(lowercase__ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _a ( lowercase__ : List[str] , lowercase__ : Dict ): '''simple docstring''' assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." SCREAMING_SNAKE_CASE__ : Dict = 0 while numbera != 0: SCREAMING_SNAKE_CASE__ : List[Any] = numbera % numbera SCREAMING_SNAKE_CASE__ : Dict = numbera SCREAMING_SNAKE_CASE__ : Any = rest # precondition assert isinstance(lowercase__ , lowercase__ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _a ( lowercase__ : str , lowercase__ : Tuple ): '''simple docstring''' assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." SCREAMING_SNAKE_CASE__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' SCREAMING_SNAKE_CASE__ : Optional[Any] = prime_factorization(lowercase__ ) SCREAMING_SNAKE_CASE__ : int = prime_factorization(lowercase__ ) elif numbera == 1 or numbera == 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : Any = max(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: SCREAMING_SNAKE_CASE__ : Dict = prime_fac_a.count(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = prime_fac_a.count(lowercase__ ) for _ in range(max(lowercase__ , lowercase__ ) ): ans *= n else: SCREAMING_SNAKE_CASE__ : List[str] = prime_fac_a.count(lowercase__ ) for _ in range(lowercase__ ): ans *= n done.append(lowercase__ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: SCREAMING_SNAKE_CASE__ : str = prime_fac_a.count(lowercase__ ) for _ in range(lowercase__ ): ans *= n done.append(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _a ( lowercase__ : Dict ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int" SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowercase__ ): ans += 1 # precondition assert isinstance(lowercase__ , lowercase__ ) and is_prime( lowercase__ ), "'ans' must been a prime number and from type int" return ans def _a ( lowercase__ : Tuple , lowercase__ : Optional[Any] ): '''simple docstring''' assert ( is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" SCREAMING_SNAKE_CASE__ : int = p_number_a + 1 # jump to the next number SCREAMING_SNAKE_CASE__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowercase__ ): number += 1 while number < p_number_a: ans.append(lowercase__ ) number += 1 # fetch the next prime number. while not is_prime(lowercase__ ): number += 1 # precondition assert ( isinstance(lowercase__ , lowercase__ ) and ans[0] != p_number_a and ans[len(lowercase__ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _a ( lowercase__ : Optional[int] ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1" SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowercase__ ) # precondition assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)" return ans def _a ( lowercase__ : Tuple ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number > 1 ), "'number' must been an int and >= 1" SCREAMING_SNAKE_CASE__ : str = get_divisors(lowercase__ ) # precondition assert ( isinstance(lowercase__ , lowercase__ ) and (divisors[0] == 1) and (divisors[len(lowercase__ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ): '''simple docstring''' assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. SCREAMING_SNAKE_CASE__ : Tuple = gcd(abs(lowercase__ ) , abs(lowercase__ ) ) # precondition assert ( isinstance(lowercase__ , lowercase__ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _a ( lowercase__ : Optional[int] ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0" SCREAMING_SNAKE_CASE__ : Optional[int] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _a ( lowercase__ : Optional[int] ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0" SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Dict = 1 SCREAMING_SNAKE_CASE__ : List[Any] = 1 # this will be return for _ in range(n - 1 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = ans ans += fiba SCREAMING_SNAKE_CASE__ : Optional[int] = tmp return ans
636
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Dict = batch_size SCREAMING_SNAKE_CASE__ : Dict = seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = num_labels SCREAMING_SNAKE_CASE__ : Dict = num_choices SCREAMING_SNAKE_CASE__ : str = scope def __lowercase( self : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase( self : Dict )-> Tuple: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ ) model.to(a_ ) model.eval() # create attention mask SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 # first forward pass SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens # append to next input_ids and attn_mask SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Dict = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , ) # get two different outputs SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval() SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) # first forward pass SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[ 'last_hidden_state' ] # select random slice SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ ) model.to(a_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.num_labels SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase( self : Any )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowercase_ = (BioGptForCausalLM,) if is_torch_available() else () lowercase_ = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def __lowercase( self : Tuple )-> int: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ : List[str] = type self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : int )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ ) def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*a_ ) def __lowercase( self : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*a_ ) @slow def __lowercase( self : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : List[str] = 'left' # Define PAD Token = EOS Token = 50256 SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id # use different length sentences to test batching SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = model.generate( input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(a_ , a_ ) self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] = 3 SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : str = 3 SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification' SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class snake_case ( unittest.TestCase ): @slow def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0] SCREAMING_SNAKE_CASE__ : List[str] = 4_2384 SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , a_ ) SCREAMING_SNAKE_CASE__ : int = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) ) @slow def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( **a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , ) SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(a_ , a_ )
636
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 SCREAMING_SNAKE_CASE__ : Optional[int] = sys.version_info >= (3, 10) def _a ( lowercase__ : Any=None , lowercase__ : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class snake_case : lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 @dataclass class snake_case : lowercase_ = 42 lowercase_ = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class snake_case : lowercase_ = False lowercase_ = True lowercase_ = None class snake_case ( UpperCamelCase_ ): lowercase_ = 'titi' lowercase_ = 'toto' class snake_case ( UpperCamelCase_ ): lowercase_ = 'titi' lowercase_ = 'toto' lowercase_ = 42 @dataclass class snake_case : lowercase_ = "toto" def __lowercase( self : List[str] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = BasicEnum(self.foo ) @dataclass class snake_case : lowercase_ = "toto" def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = MixedTypeEnum(self.foo ) @dataclass class snake_case : lowercase_ = None lowercase_ = field(default=UpperCamelCase_ , metadata={'help': 'help message'} ) lowercase_ = None lowercase_ = list_field(default=[] ) lowercase_ = list_field(default=[] ) @dataclass class snake_case : lowercase_ = list_field(default=[] ) lowercase_ = list_field(default=[1, 2, 3] ) lowercase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) lowercase_ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class snake_case : lowercase_ = field() lowercase_ = field() lowercase_ = field() def __lowercase( self : Optional[int] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = BasicEnum(self.required_enum ) @dataclass class snake_case : lowercase_ = 42 lowercase_ = field() lowercase_ = None lowercase_ = field(default='toto' , metadata={'help': 'help message'} ) lowercase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class snake_case : lowercase_ = False lowercase_ = True lowercase_ = None @dataclass class snake_case : lowercase_ = None lowercase_ = field(default=UpperCamelCase_ , metadata={'help': 'help message'} ) lowercase_ = None lowercase_ = list_field(default=[] ) lowercase_ = list_field(default=[] ) class snake_case ( unittest.TestCase ): def __lowercase( self : int , a_ : argparse.ArgumentParser , a_ : argparse.ArgumentParser )-> Tuple: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): SCREAMING_SNAKE_CASE__ : List[str] = {k: v for k, v in vars(a_ ).items() if k != 'container'} SCREAMING_SNAKE_CASE__ : Dict = {k: v for k, v in vars(a_ ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , a_ ) and yy.get('choices' , a_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](a_ ) , yy['type'](a_ ) ) del xx["type"], yy["type"] self.assertEqual(a_ , a_ ) def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser() expected.add_argument('--foo' , type=a_ , required=a_ ) expected.add_argument('--bar' , type=a_ , required=a_ ) expected.add_argument('--baz' , type=a_ , required=a_ ) expected.add_argument('--flag' , type=a_ , default=a_ , const=a_ , nargs='?' ) self.argparsersEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((SCREAMING_SNAKE_CASE__) , ) : Tuple = parser.parse_args_into_dataclasses(a_ , look_for_args_file=a_ ) self.assertFalse(example.flag ) def __lowercase( self : Dict )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=a_ ) expected.add_argument('--baz' , default='toto' , type=a_ , help='help message' ) self.argparsersEqual(a_ , a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() expected.add_argument('--foo' , type=a_ , default=a_ , const=a_ , nargs='?' ) expected.add_argument('--baz' , type=a_ , default=a_ , const=a_ , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=a_ , dest='baz' ) expected.add_argument('--opt' , type=a_ , default=a_ ) SCREAMING_SNAKE_CASE__ : str = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(a_ ) for dataclass_type in dataclass_types: SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser(a_ ) self.argparsersEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args([] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __lowercase( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" @dataclass class snake_case : lowercase_ = "toto" SCREAMING_SNAKE_CASE__ : Tuple = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=a_ ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=a_ ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=a_ ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=a_ ) self.argparsersEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args([] ) self.assertEqual( a_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(a_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser() expected.add_argument('--foo' , default=a_ , type=a_ ) expected.add_argument('--bar' , default=a_ , type=a_ , help='help message' ) expected.add_argument('--baz' , default=a_ , type=a_ ) expected.add_argument('--ces' , nargs='+' , default=[] , type=a_ ) expected.add_argument('--des' , nargs='+' , default=[] , type=a_ ) SCREAMING_SNAKE_CASE__ : int = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(a_ ) for dataclass_type in dataclass_types: SCREAMING_SNAKE_CASE__ : Dict = HfArgumentParser(a_ ) self.argparsersEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args([] ) self.assertEqual(a_ , Namespace(foo=a_ , bar=a_ , baz=a_ , ces=[] , des=[] ) ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(a_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=a_ , required=a_ ) expected.add_argument('--required_str' , type=a_ , required=a_ ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=a_ , ) self.argparsersEqual(a_ , a_ ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() expected.add_argument('--foo' , type=a_ , required=a_ ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=a_ , ) expected.add_argument('--opt' , type=a_ , default=a_ ) expected.add_argument('--baz' , default='toto' , type=a_ , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=a_ ) self.argparsersEqual(a_ , a_ ) def __lowercase( self : Tuple )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_dict(a_ )[0] SCREAMING_SNAKE_CASE__ : Dict = BasicExample(**a_ ) self.assertEqual(a_ , a_ ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Dict = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 42, } self.assertRaises(a_ , parser.parse_dict , a_ , allow_extra_keys=a_ ) def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : str = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(a_ , 'temp_json' ) os.mkdir(a_ ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = BasicExample(**a_ ) self.assertEqual(a_ , a_ ) def __lowercase( self : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = HfArgumentParser(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Any = os.path.join(a_ , 'temp_yaml' ) os.mkdir(a_ ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Any = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] SCREAMING_SNAKE_CASE__ : List[str] = BasicExample(**a_ ) self.assertEqual(a_ , a_ ) def __lowercase( self : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = HfArgumentParser(a_ ) self.assertIsNotNone(a_ )
636
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random() def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ): '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = parent SCREAMING_SNAKE_CASE__ : List[Any] = batch_size SCREAMING_SNAKE_CASE__ : str = min_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__ : int = feature_size SCREAMING_SNAKE_CASE__ : str = padding_value SCREAMING_SNAKE_CASE__ : Any = sampling_rate SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize SCREAMING_SNAKE_CASE__ : int = num_mel_bins SCREAMING_SNAKE_CASE__ : int = hop_length SCREAMING_SNAKE_CASE__ : str = win_length SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function SCREAMING_SNAKE_CASE__ : List[str] = fmin SCREAMING_SNAKE_CASE__ : Dict = fmax SCREAMING_SNAKE_CASE__ : int = mel_floor SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask def __lowercase( self : Dict )-> Dict: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]: """simple docstring""" def _flatten(a_ : int ): return list(itertools.chain(*a_ ) ) if equal_length: SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]: """simple docstring""" if equal_length: SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Tuple = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = SpeechTaFeatureExtractor def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self ) def __lowercase( self : Any , a_ : Optional[int] )-> List[str]: """simple docstring""" self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) ) def __lowercase( self : Tuple )-> Dict: """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None] for max_length, padding in zip(a_ , a_ ): SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 ) SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths] SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None] for max_length, padding in zip(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ ) SCREAMING_SNAKE_CASE__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __lowercase( self : int )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract( a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __lowercase( self : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : List[str] = feat_extract( a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : str = feat_extract( a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __lowercase( self : Any )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(a_ , a_ ): self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : Dict )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __lowercase( self : Tuple )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , a_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ ) def __lowercase( self : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE__ : str = min(a_ ) SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE__ : int = feat_extract.pad( a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' ) self.assertIn('attention_mask' , a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __lowercase( self : Optional[int] , a_ : List[str] )-> Any: """simple docstring""" from datasets import load_dataset SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def __lowercase( self : List[str] )-> List[Any]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor( [2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3, 3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3, 2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4, 4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3, 7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4, 4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] ) # fmt: on SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) ) def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
636
1
def _a ( lowercase__ : int ): '''simple docstring''' if number > 0: raise ValueError('input must be a negative integer' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(bin(lowercase__ )[3:] ) SCREAMING_SNAKE_CASE__ : str = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:] SCREAMING_SNAKE_CASE__ : Optional[int] = ( ( '1' + '0' * (binary_number_length - len(lowercase__ )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
636
import math import sys def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '' try: with open(lowercase__ , 'rb' ) as binary_file: SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', '' SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ ) for i in range(len(lowercase__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string] result += last_match_id SCREAMING_SNAKE_CASE__ : str = last_match_id + '0' if math.loga(lowercase__ ).is_integer(): SCREAMING_SNAKE_CASE__ : List[str] = {} for curr_key in list(lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1' index += 1 SCREAMING_SNAKE_CASE__ : Tuple = '' return result def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = 8 try: with open(lowercase__ , 'wb' ) as opened_file: SCREAMING_SNAKE_CASE__ : Dict = [ to_write[i : i + byte_length] for i in range(0 , len(lowercase__ ) , lowercase__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 0 for letter in data_bits: if letter == "1": break counter += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:] SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :] return data_bits def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ ) SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ ) write_file_binary(lowercase__ , lowercase__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
636
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") SCREAMING_SNAKE_CASE__ : Tuple = {"target_lang": "fi", "source_lang": "en"} SCREAMING_SNAKE_CASE__ : List[str] = ">>zh<<" SCREAMING_SNAKE_CASE__ : List[str] = "Helsinki-NLP/" if is_torch_available(): SCREAMING_SNAKE_CASE__ : Dict = "pt" elif is_tf_available(): SCREAMING_SNAKE_CASE__ : List[Any] = "tf" else: SCREAMING_SNAKE_CASE__ : List[Any] = "jax" @require_sentencepiece class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = MarianTokenizer lowercase_ = False lowercase_ = True def __lowercase( self : int )-> List[str]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : int = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] SCREAMING_SNAKE_CASE__ : Dict = dict(zip(a_ , range(len(a_ ) ) ) ) SCREAMING_SNAKE_CASE__ : str = Path(self.tmpdirname ) save_json(a_ , save_dir / VOCAB_FILES_NAMES['vocab'] ) save_json(a_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(a_ , save_dir / VOCAB_FILES_NAMES['source_spm'] ) copyfile(a_ , save_dir / VOCAB_FILES_NAMES['target_spm'] ) SCREAMING_SNAKE_CASE__ : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : int , **a_ : Union[str, Any] )-> MarianTokenizer: """simple docstring""" return MarianTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : int , a_ : Union[str, Any] )-> int: """simple docstring""" return ( "This is a test", "This is a test", ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = '</s>' SCREAMING_SNAKE_CASE__ : Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '</s>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(a_ ) , 9 ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __lowercase( self : Any )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = en_de_tokenizer(['I am a small frog'] , return_tensors=a_ ) self.assertIsInstance(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(a_ , batch.input_ids[0] ) SCREAMING_SNAKE_CASE__ : int = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = [x.name for x in Path(a_ ).glob('*' )] self.assertIn('source.spm' , a_ ) MarianTokenizer.from_pretrained(a_ ) def __lowercase( self : Dict )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : str = tok( ['I am a small frog' * 1000, 'I am a small frog'] , padding=a_ , truncation=a_ , return_tensors=a_ ) self.assertIsInstance(a_ , a_ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def __lowercase( self : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : int = tok(['I am a tiny frog', 'I am a small frog'] , padding=a_ , return_tensors=a_ ) self.assertIsInstance(a_ , a_ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __lowercase( self : Optional[int] )-> Optional[int]: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Tuple = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , ) def __lowercase( self : Tuple )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' ) SCREAMING_SNAKE_CASE__ : Dict = 'Tämä on testi' SCREAMING_SNAKE_CASE__ : List[str] = 'This is a test' SCREAMING_SNAKE_CASE__ : Any = [76, 7, 2047, 2] SCREAMING_SNAKE_CASE__ : Optional[int] = [69, 12, 11, 940, 2] SCREAMING_SNAKE_CASE__ : Dict = tokenizer(a_ ).input_ids self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(text_target=a_ ).input_ids self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(a_ , skip_special_tokens=a_ ) self.assertEqual(a_ , a_ )
636
def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} ) SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'} for i in range(len(lowercase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowercase__ ) == 0 def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' ) if is_balanced(lowercase__ ): print(lowercase__ , 'is balanced' ) else: print(lowercase__ , 'is not balanced' ) if __name__ == "__main__": main()
636
1
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): def __init__( self : Optional[int] , *a_ : int , **a_ : Any )-> None: """simple docstring""" warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.' , a_ , ) super().__init__(*a_ , **a_ )
636
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : int )-> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : Optional[int] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = '</s>' SCREAMING_SNAKE_CASE__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Dict )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(a_ ) , 1103 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : Any )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.' SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example'] SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. @slow def __lowercase( self : Any )-> str: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PegasusTokenizer lowercase_ = PegasusTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowercase( self : Optional[Any] )-> List[str]: """simple docstring""" return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : Tuple )-> str: """simple docstring""" return ("This is a test", "This is a test") def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0] self.assertListEqual(a_ , a_ ) @require_torch def __lowercase( self : List[str] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example'] SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny'] SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer( text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(a_ ) == 2 # input_ids, attention_mask. def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids self.assertListEqual( a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
636
1
from sklearn.metrics import fa_score import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" SCREAMING_SNAKE_CASE__ : Any = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): def __lowercase( self : List[str] )-> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def __lowercase( self : List[Any] , a_ : Dict , a_ : Optional[int] , a_ : int=None , a_ : Optional[Any]=1 , a_ : Union[str, Any]="binary" , a_ : Union[str, Any]=None )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = fa_score( a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ ) return {"f1": float(a_ ) if score.size == 1 else score}
636
def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , lowercase__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
636
1
def _a ( lowercase__ : str , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = len(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : Union[str, Any] = True for i in range(lowercase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] = True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[str] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
636
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _a ( lowercase__ : List[str] , lowercase__ : Dict ): '''simple docstring''' for i in range(config.num_hidden_layers ): SCREAMING_SNAKE_CASE__ : Dict = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :] def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = val @torch.no_grad() def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : str = False if "vqa" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : str = 31_29 SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json' SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Dict = idalabel SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ ) elif "nlvr" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'} SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()} SCREAMING_SNAKE_CASE__ : Tuple = 3 SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ ) elif "irtr" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ ) elif "mlm_itm" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict'] SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ ) if mlm_model or irtr_model: SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) # load state dict into HuggingFace model model.eval() if mlm_model: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowercase__ ) # Define processor SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 ) SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ ) # Forward pass on example inputs (image + text) if nlvr_model: SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) SCREAMING_SNAKE_CASE__ : Tuple = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : List[Any] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw ) if mlm_model: SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].' else: SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?' SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ ) # Verify outputs if mlm_model: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify masked token prediction equals "cats" SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify vqa prediction equals "2" SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
636
1
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
636
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case : lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 # [batch_size x 3] lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def __lowercase( self : Dict )-> Tuple: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def __lowercase( self : Dict )-> Union[str, Any]: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def __lowercase( self : Tuple )-> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack( [ pixel_indices % self.width, torch.div(a_ , self.width , rounding_mode='trunc' ), ] , axis=1 , ) return coords @property def __lowercase( self : Any )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords() SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 ) SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution() SCREAMING_SNAKE_CASE__ : str = self.fov() SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1 SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 ) SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 ) SCREAMING_SNAKE_CASE__ : str = ( self.z.view(a_ , 1 , 3 ) + self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:] ) SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.stack( [ torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(a_ , *a_ , 2 , 3 ) def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) SCREAMING_SNAKE_CASE__ : Tuple = -z * 4 SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ ) origins.append(lowercase__ ) xs.append(lowercase__ ) ys.append(lowercase__ ) zs.append(lowercase__ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
636
1
from __future__ import annotations from collections.abc import Callable SCREAMING_SNAKE_CASE__ : Tuple = list[list[float | int]] def _a ( lowercase__ : Matrix , lowercase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = len(lowercase__ ) SCREAMING_SNAKE_CASE__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(lowercase__ ): for col in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] = matrix[row][col] SCREAMING_SNAKE_CASE__ : List[Any] = vector[row][0] SCREAMING_SNAKE_CASE__ : Tuple = 0 SCREAMING_SNAKE_CASE__ : List[str] = 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : List[str] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowercase__ ): SCREAMING_SNAKE_CASE__ : int = augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : List[str] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowercase__ ): for row in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = augmented[row][col] / augmented[col][col] for cola in range(lowercase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ ) ] def _a ( lowercase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = len(lowercase__ ) SCREAMING_SNAKE_CASE__ : Matrix = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )] SCREAMING_SNAKE_CASE__ : Matrix = [[0] for _ in range(lowercase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(lowercase__ ): for col in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Tuple = y_val SCREAMING_SNAKE_CASE__ : Tuple = solve(lowercase__ , lowercase__ ) def interpolated_func(lowercase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowercase__ ) ) return interpolated_func def _a ( lowercase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( lowercase__ : Callable[[int], int] = question_function , lowercase__ : int = 10 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] = [func(lowercase__ ) for x_val in range(1 , order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Optional[int] = 1 while func(lowercase__ ) == poly(lowercase__ ): x_val += 1 ret += poly(lowercase__ ) return ret if __name__ == "__main__": print(F"""{solution() = }""")
636
import requests SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(f'''{i}.) {article['title']}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
636
1
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class snake_case : def __init__( self : List[Any] , a_ : int , a_ : List[str]=13 , a_ : Tuple=7 , a_ : str=True , a_ : int=True , a_ : Tuple=True , a_ : Any=True , a_ : Any=99 , a_ : List[Any]=32 , a_ : Union[str, Any]=5 , a_ : Dict=4 , a_ : Any=4 , a_ : Union[str, Any]="gelu" , a_ : List[str]=0.0 , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]=True , a_ : Any=512 , a_ : List[str]=16 , a_ : Dict=2 , a_ : Optional[int]=0.02 , a_ : str=3 , a_ : Optional[int]=4 , a_ : str=None , )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : str = batch_size SCREAMING_SNAKE_CASE__ : int = seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = is_training SCREAMING_SNAKE_CASE__ : Tuple = use_input_mask SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids SCREAMING_SNAKE_CASE__ : Dict = use_labels SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : str = intermediate_multiple_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE__ : int = hidden_dropout SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout SCREAMING_SNAKE_CASE__ : Optional[Any] = weight_tying SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE__ : int = type_vocab_size SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = num_labels SCREAMING_SNAKE_CASE__ : Dict = num_choices SCREAMING_SNAKE_CASE__ : Optional[int] = scope def __lowercase( self : Any )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __lowercase( self : List[str] )-> Any: """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def __lowercase( self : Tuple )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Any = True return config, input_ids, input_mask, token_labels def __lowercase( self : int , a_ : Optional[int] , a_ : int , a_ : Optional[int] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = GPTNeoXJapaneseModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , attention_mask=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Tuple = GPTNeoXJapaneseModel(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : List[Any] , a_ : Any )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = GPTNeoXJapaneseForCausalLM(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase( self : Optional[int] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : List[Any] = GPTNeoXJapaneseForCausalLM(config=a_ ) model.to(a_ ) model.eval() # first forward pass SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , use_cache=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : str = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , output_hidden_states=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past['hidden_states'][0] SCREAMING_SNAKE_CASE__ : Tuple = model( a_ , attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () lowercase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () lowercase_ = ( {'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : Dict )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = GPTNeoXJapaneseModelTester(self ) SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=a_ , hidden_size=37 ) def __lowercase( self : str )-> Any: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a_ , a_ , a_ ) def __lowercase( self : List[str] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(a_ , a_ , a_ ) def __lowercase( self : Tuple )-> Any: """simple docstring""" # This regression test was failing with PyTorch < 1.3 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE__ : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder(a_ , a_ , a_ ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(a_ , a_ , a_ ) def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*a_ ) @slow def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'abeja/gpt-neox-japanese-2.7b' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、'] SCREAMING_SNAKE_CASE__ : Any = [ 'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。', '100年後に必要とされる会社は、「人」が中心の会社です。', 'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。', '国境の長いトンネルを抜けると、そこは雪国だった。', '美味しい日本食といえば、やっぱりお寿司ですよね。', ] SCREAMING_SNAKE_CASE__ : Dict = GPTNeoXJapaneseTokenizer.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : int = GPTNeoXJapaneseForCausalLM.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : str = [] for prompt in prompts: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' ).input_ids SCREAMING_SNAKE_CASE__ : Tuple = model.generate(a_ , max_length=50 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) predicted_outputs += generated_string self.assertListEqual(a_ , a_ )
636
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger() @dataclass class snake_case : lowercase_ = 42 lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = field(default_factory=UpperCamelCase_ ) def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(a_ ) def __call__( self : Tuple , a_ : Tensor )-> Any: """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(a_ ) [x.remove() for x in self.handles] return self @property def __lowercase( self : Tuple )-> int: """simple docstring""" # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class snake_case : lowercase_ = 42 lowercase_ = 42 lowercase_ = 1 lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = field(default_factory=UpperCamelCase_ ) lowercase_ = True def __call__( self : List[Any] , a_ : Tensor )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) ) SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) ) if len(a_ ) != len(a_ ) and self.raise_if_mismatch: raise Exception( F'''Numbers of operations are different. Source module has {len(a_ )} operations while''' F''' destination module has {len(a_ )}.''' ) for dest_m, src_m in zip(a_ , a_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) class snake_case ( nn.Module ): def __init__( self : List[Any] , a_ : nn.Module )-> Dict: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), F'''Unexpected layer name {k}''' SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1 feature_blocks.append((F'''res{block_index}''', v) ) SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ ) def __lowercase( self : Tuple , a_ : Tensor )-> Dict: """simple docstring""" return get_trunk_forward_outputs( a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , ) class snake_case ( UpperCamelCase_ ): def __lowercase( self : Optional[Any] , a_ : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]: """simple docstring""" # default to timm! if x not in self: SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) ) else: SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ ) return val class snake_case ( UpperCamelCase_ ): def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]: """simple docstring""" if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE__ : Any = RegNetModel else: SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification return val def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ): '''simple docstring''' for from_key, to_key in keys: SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone() print(f'''Copied key={from_key} to={to_key}''' ) return to_state_dict def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func() SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(lowercase__ ) if from_state_dict is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ ) our_model.load_state_dict(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = ( our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1] assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84 # we can use the convnext one SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , ) print(f'''Pushed {name}''' ) def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE__ : Tuple = 10_00 SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels) SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files' SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = idalabel SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ), } SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap() SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' ) SCREAMING_SNAKE_CASE__ : Tuple = model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model'] SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk'] model.load_state_dict(lowercase__ ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE__ : Any = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : int = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : List[Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned SCREAMING_SNAKE_CASE__ : List[Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE__ : Any = partial( lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , ) return config, expected_shape if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
636
1
import json import sys def _a ( lowercase__ : int , lowercase__ : Optional[int] ): '''simple docstring''' with open(lowercase__ , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' '] for benchmark_name in sorted(lowercase__ ): SCREAMING_SNAKE_CASE__ : str = results[benchmark_name] SCREAMING_SNAKE_CASE__ : Optional[int] = benchmark_name.split('/' )[-1] output_md.append(f'''### Benchmark: {benchmark_file_name}''' ) SCREAMING_SNAKE_CASE__ : str = '| metric |' SCREAMING_SNAKE_CASE__ : int = '|--------|' SCREAMING_SNAKE_CASE__ : Any = '| new / old (diff) |' for metric_name in sorted(lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = benchmark_res[metric_name] SCREAMING_SNAKE_CASE__ : List[Any] = metric_vals['new'] SCREAMING_SNAKE_CASE__ : Dict = metric_vals.get('old' , lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = metric_vals.get('diff' , lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = f''' {new_val:f}''' if isinstance(lowercase__ , (int, float) ) else 'None' if old_val is not None: val_str += f''' / {old_val:f}''' if isinstance(lowercase__ , (int, float) ) else "None" if dif_val is not None: val_str += f''' ({dif_val:f})''' if isinstance(lowercase__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('</details>' ) with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.writelines('\n'.join(lowercase__ ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = sys.argv[1] SCREAMING_SNAKE_CASE__ : Optional[int] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
636
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'OwlViTImageProcessor' lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a_ , a_ ) def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )): SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )] elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ): SCREAMING_SNAKE_CASE__ : Any = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(a_ ) != max_num_queries: SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ )) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ ) encodings.append(a_ ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding() SCREAMING_SNAKE_CASE__ : List[str] = input_ids SCREAMING_SNAKE_CASE__ : Tuple = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ : Any = BatchEncoding() SCREAMING_SNAKE_CASE__ : Dict = self.image_processor( a_ , return_tensors=a_ , **a_ ).pixel_values SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]: """simple docstring""" return self.image_processor.post_process(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*a_ , **a_ ) def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*a_ , **a_ ) def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Tuple )-> Any: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , ) return self.image_processor_class @property def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , ) return self.image_processor
636
1
from __future__ import annotations SCREAMING_SNAKE_CASE__ : List[Any] = [] def _a ( lowercase__ : list[list[int]] , lowercase__ : int , lowercase__ : int ): '''simple docstring''' for i in range(len(lowercase__ ) ): if board[row][i] == 1: return False for i in range(len(lowercase__ ) ): if board[i][column] == 1: return False for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ): if board[i][j] == 1: return False return True def _a ( lowercase__ : list[list[int]] , lowercase__ : int ): '''simple docstring''' if row >= len(lowercase__ ): solution.append(lowercase__ ) printboard(lowercase__ ) print() return True for i in range(len(lowercase__ ) ): if is_safe(lowercase__ , lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : Dict = 1 solve(lowercase__ , row + 1 ) SCREAMING_SNAKE_CASE__ : List[Any] = 0 return False def _a ( lowercase__ : list[list[int]] ): '''simple docstring''' for i in range(len(lowercase__ ) ): for j in range(len(lowercase__ ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) SCREAMING_SNAKE_CASE__ : Dict = 8 SCREAMING_SNAKE_CASE__ : Optional[int] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
636
class snake_case ( UpperCamelCase_ ): pass class snake_case ( UpperCamelCase_ ): pass class snake_case : def __init__( self : Union[str, Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [ [], [], [], ] def __lowercase( self : int , a_ : int , a_ : int )-> None: """simple docstring""" try: if len(self.queues[priority] ) >= 100: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(a_ ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def __lowercase( self : int )-> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self : Any )-> str: """simple docstring""" return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class snake_case : def __init__( self : Union[str, Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = [] def __lowercase( self : List[str] , a_ : int )-> None: """simple docstring""" if len(self.queue ) == 100: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(a_ ) def __lowercase( self : int )-> int: """simple docstring""" if not self.queue: raise UnderFlowError('The queue is empty' ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue ) self.queue.remove(a_ ) return data def __str__( self : List[str] )-> str: """simple docstring""" return str(self.queue ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
636
1
from datetime import datetime import requests def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' SCREAMING_SNAKE_CASE__ : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(lowercase__ ).content if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = input("Enter Video/IGTV url: ").strip() SCREAMING_SNAKE_CASE__ : Union[str, Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
636
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _a ( lowercase__ : List[str] ): '''simple docstring''' if not is_accelerate_available(): return method SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase__ ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *lowercase__ , **lowercase__ ) return wrapper
636
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : int , a_ : Tuple , a_ : Union[str, Any]=7 , a_ : int=3 , a_ : Optional[Any]=30 , a_ : List[Any]=400 , a_ : Tuple=True , a_ : Dict=None , a_ : Any=True , a_ : Union[str, Any]=[0.5, 0.5, 0.5] , a_ : Any=[0.5, 0.5, 0.5] , a_ : Dict=True , a_ : int=1 / 255 , a_ : Tuple=True , )-> Tuple: """simple docstring""" # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : List[str] = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : int = do_resize SCREAMING_SNAKE_CASE__ : List[str] = size SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : Tuple = image_mean SCREAMING_SNAKE_CASE__ : Optional[int] = image_std SCREAMING_SNAKE_CASE__ : Optional[int] = do_rescale SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor SCREAMING_SNAKE_CASE__ : str = do_pad def __lowercase( self : str )-> Dict: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowercase( self : Any , a_ : Union[str, Any] , a_ : List[Any]=False )-> List[Any]: """simple docstring""" if not batched: SCREAMING_SNAKE_CASE__ : Optional[int] = image_inputs[0] if isinstance(a_ , Image.Image ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.size else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE__ : Optional[Any] = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE__ : Tuple = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE__ : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE__ : Any = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.size['shortest_edge'] SCREAMING_SNAKE_CASE__ : str = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE__ : Any = max(a_ , key=lambda a_ : item[0] )[0] SCREAMING_SNAKE_CASE__ : Tuple = max(a_ , key=lambda a_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = ConditionalDetrImageProcessor if is_vision_available() else None def __lowercase( self : Dict )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def __lowercase( self : Dict )-> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : str )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'image_mean' ) ) self.assertTrue(hasattr(a_ , 'image_std' ) ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_resize' ) ) self.assertTrue(hasattr(a_ , 'size' ) ) def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_ ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , a_ ) def __lowercase( self : Any )-> Dict: """simple docstring""" pass def __lowercase( self : List[Any] )-> str: """simple docstring""" # Initialize image_processing SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) SCREAMING_SNAKE_CASE__ : Dict = image_processing(a_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowercase( self : Tuple )-> List[str]: """simple docstring""" # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processing(a_ , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" # Initialize image_processing SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(a_ , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowercase( self : List[str] )-> Tuple: """simple docstring""" # prepare image and target SCREAMING_SNAKE_CASE__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: SCREAMING_SNAKE_CASE__ : Optional[int] = json.loads(f.read() ) SCREAMING_SNAKE_CASE__ : str = {'image_id': 3_9769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE__ : Optional[Any] = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' ) SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(images=a_ , annotations=a_ , return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1e-4 ) ) # verify area SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_ ) ) # verify boxes SCREAMING_SNAKE_CASE__ : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1e-3 ) ) # verify image_id SCREAMING_SNAKE_CASE__ : str = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_ ) ) # verify class_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_ ) ) # verify orig_size SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_ ) ) # verify size SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_ ) ) @slow def __lowercase( self : str )-> Tuple: """simple docstring""" # prepare image, target and masks_path SCREAMING_SNAKE_CASE__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: SCREAMING_SNAKE_CASE__ : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE__ : Dict = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} SCREAMING_SNAKE_CASE__ : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE__ : List[str] = ConditionalDetrImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1e-4 ) ) # verify area SCREAMING_SNAKE_CASE__ : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_ ) ) # verify boxes SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1e-3 ) ) # verify image_id SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_ ) ) # verify class_labels SCREAMING_SNAKE_CASE__ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_ ) ) # verify masks SCREAMING_SNAKE_CASE__ : Tuple = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a_ ) # verify orig_size SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_ ) ) # verify size SCREAMING_SNAKE_CASE__ : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_ ) )
636
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _a ( lowercase__ : int ): '''simple docstring''' if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ): return False return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule ) def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : Tuple = model SCREAMING_SNAKE_CASE__ : int = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : Any = model.module if not keep_fpaa_wrapper: SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' ) SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ ) if original_forward is not None: while hasattr(lowercase__ , '__wrapped__' ): SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__ if forward == original_forward: break SCREAMING_SNAKE_CASE__ : Dict = forward if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ): convert_model(lowercase__ , to_transformer_engine=lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : List[Any] = model SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model return model def _a ( ): '''simple docstring''' PartialState().wait_for_everyone() def _a ( lowercase__ : str , lowercase__ : Optional[Any] ): '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(lowercase__ , lowercase__ ) elif PartialState().local_process_index == 0: torch.save(lowercase__ , lowercase__ ) @contextmanager def _a ( **lowercase__ : str ): '''simple docstring''' for key, value in kwargs.items(): SCREAMING_SNAKE_CASE__ : int = str(lowercase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ): SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ ) if hasattr(lowercase__ , '__qualname__' ): return obj.__qualname__ if hasattr(lowercase__ , '__name__' ): return obj.__name__ return str(lowercase__ ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' for key, value in source.items(): if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} ) merge_dicts(lowercase__ , lowercase__ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = value return destination def _a ( lowercase__ : int = None ): '''simple docstring''' if port is None: SCREAMING_SNAKE_CASE__ : int = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
636
1
from ....configuration_utils import PretrainedConfig from ....utils import logging SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) # TODO: upload to AWS SCREAMING_SNAKE_CASE__ : Optional[int] = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class snake_case ( UpperCamelCase_ ): lowercase_ = 'retribert' def __init__( self : Any , a_ : Optional[Any]=3_0522 , a_ : Optional[int]=768 , a_ : Union[str, Any]=8 , a_ : Dict=12 , a_ : Optional[Any]=3072 , a_ : Dict="gelu" , a_ : Dict=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=512 , a_ : List[str]=2 , a_ : Tuple=0.02 , a_ : int=1e-1_2 , a_ : Optional[int]=True , a_ : List[Any]=128 , a_ : List[Any]=0 , **a_ : List[Any] , )-> List[str]: """simple docstring""" super().__init__(pad_token_id=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE__ : Any = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : int = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = type_vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict = share_encoders SCREAMING_SNAKE_CASE__ : Optional[int] = projection_dim
636
from __future__ import annotations def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ): '''simple docstring''' if len(lowercase__ ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(lowercase__ ) or left < -len(lowercase__ ) or right >= len(lowercase__ ) or right < -len(lowercase__ ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid] SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
636
1
from ....configuration_utils import PretrainedConfig from ....utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class snake_case ( UpperCamelCase_ ): lowercase_ = 'trajectory_transformer' lowercase_ = ['past_key_values'] lowercase_ = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Dict , a_ : List[str]=100 , a_ : str=5 , a_ : Optional[Any]=1 , a_ : str=1 , a_ : List[str]=249 , a_ : List[Any]=6 , a_ : Tuple=17 , a_ : Any=25 , a_ : Optional[Any]=4 , a_ : Tuple=4 , a_ : int=128 , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : str=0.0006 , a_ : Optional[Any]=512 , a_ : Optional[int]=0.02 , a_ : Optional[Any]=1e-1_2 , a_ : Any=1 , a_ : int=True , a_ : Optional[int]=1 , a_ : Optional[Any]=5_0256 , a_ : Dict=5_0256 , **a_ : Optional[Any] , )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = vocab_size SCREAMING_SNAKE_CASE__ : Dict = action_weight SCREAMING_SNAKE_CASE__ : int = reward_weight SCREAMING_SNAKE_CASE__ : Optional[Any] = value_weight SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[str] = block_size SCREAMING_SNAKE_CASE__ : int = action_dim SCREAMING_SNAKE_CASE__ : Any = observation_dim SCREAMING_SNAKE_CASE__ : Tuple = transition_dim SCREAMING_SNAKE_CASE__ : int = learning_rate SCREAMING_SNAKE_CASE__ : str = n_layer SCREAMING_SNAKE_CASE__ : Dict = n_head SCREAMING_SNAKE_CASE__ : List[str] = n_embd SCREAMING_SNAKE_CASE__ : int = embd_pdrop SCREAMING_SNAKE_CASE__ : str = attn_pdrop SCREAMING_SNAKE_CASE__ : Tuple = resid_pdrop SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps SCREAMING_SNAKE_CASE__ : Union[str, Any] = kaiming_initializer_range SCREAMING_SNAKE_CASE__ : int = use_cache super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
636
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _a ( lowercase__ : Any ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [state.process_index] SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _a ( lowercase__ : int ): '''simple docstring''' if state.is_main_process: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device ) else: SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device ) SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' if state.num_processes != 2: return SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}''' def _a ( lowercase__ : int ): '''simple docstring''' main() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = PartialState() state.print(f'''State: {state}''' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
636
1
from __future__ import annotations def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ): '''simple docstring''' if len(lowercase__ ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(lowercase__ ) or left < -len(lowercase__ ) or right >= len(lowercase__ ) or right < -len(lowercase__ ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid] SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
636
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE__ : Any = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : Any = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size SCREAMING_SNAKE_CASE__ : List[str] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : List[Any] = size SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096] SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16} def __lowercase( self : Optional[Any] )-> str: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowercase( self : Dict )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self ) @property def __lowercase( self : Dict )-> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE__ : List[Any] = 2048 SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __lowercase( self : Any )-> Tuple: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : Any )-> Any: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 SCREAMING_SNAKE_CASE__ : int = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(a_ ): SCREAMING_SNAKE_CASE__ : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello' SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processor( a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) SCREAMING_SNAKE_CASE__ : str = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : str = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : int = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 ) SCREAMING_SNAKE_CASE__ : Dict = 3 @property def __lowercase( self : Any )-> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" # Initialize image_processor SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processor( a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
636
1
def _a ( lowercase__ : Any , lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _a ( lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 while b > 0: if b & 1: SCREAMING_SNAKE_CASE__ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
636
import heapq as hq import math from collections.abc import Iterator class snake_case : def __init__( self : str , a_ : str )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = str(id_ ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance} def __lt__( self : int , a_ : Tuple )-> Union[str, Any]: """simple docstring""" return self.key < other.key def __repr__( self : Any )-> Dict: """simple docstring""" return self.id def __lowercase( self : Optional[Any] , a_ : int )-> List[str]: """simple docstring""" self.neighbors.append(a_ ) def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = weight def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowercase__ ) graph[b - 1].add_edge(graph[a - 1] , lowercase__ ) def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] for u in graph: SCREAMING_SNAKE_CASE__ : Dict = math.inf SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : int = graph[:] while q: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ ) q.remove(lowercase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : int = u SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id] for i in range(1 , len(lowercase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _a ( lowercase__ : list , lowercase__ : Vertex ): '''simple docstring''' for u in graph: SCREAMING_SNAKE_CASE__ : List[str] = math.inf SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ ) hq.heapify(lowercase__ ) while h: SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE__ : List[str] = u SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id] hq.heapify(lowercase__ ) for i in range(1 , len(lowercase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
636
1
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE__ : Dict = random.Random() def _a ( lowercase__ : Any , lowercase__ : Tuple=1.0 , lowercase__ : int=None , lowercase__ : int=None ): '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE__ : int = global_rng SCREAMING_SNAKE_CASE__ : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): def __init__( self : Any , a_ : str , a_ : int=7 , a_ : str=400 , a_ : Any=2000 , a_ : List[str]=2048 , a_ : Optional[Any]=128 , a_ : Union[str, Any]=1 , a_ : Dict=512 , a_ : Dict=30 , a_ : Tuple=4_4100 , )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = min_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length SCREAMING_SNAKE_CASE__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__ : Any = spectrogram_length SCREAMING_SNAKE_CASE__ : Dict = feature_size SCREAMING_SNAKE_CASE__ : Tuple = num_audio_channels SCREAMING_SNAKE_CASE__ : Any = hop_length SCREAMING_SNAKE_CASE__ : int = chunk_length SCREAMING_SNAKE_CASE__ : Dict = sampling_rate def __lowercase( self : int )-> int: """simple docstring""" return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase( self : int , a_ : Any=False , a_ : Union[str, Any]=False )-> str: """simple docstring""" def _flatten(a_ : Optional[Any] ): return list(itertools.chain(*a_ ) ) if equal_length: SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = TvltFeatureExtractor def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = TvltFeatureExtractionTester(self ) def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(a_ , 'spectrogram_length' ) ) self.assertTrue(hasattr(a_ , 'feature_size' ) ) self.assertTrue(hasattr(a_ , 'num_audio_channels' ) ) self.assertTrue(hasattr(a_ , 'hop_length' ) ) self.assertTrue(hasattr(a_ , 'chunk_length' ) ) self.assertTrue(hasattr(a_ , 'sampling_rate' ) ) def __lowercase( self : int )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Any = feat_extract_first.save_pretrained(a_ )[0] check_json_file_has_correct_format(a_ ) SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE__ : str = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE__ : Dict = dict_first.pop('mel_filters' ) SCREAMING_SNAKE_CASE__ : Tuple = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(a_ , a_ ) ) self.assertEqual(a_ , a_ ) def __lowercase( self : List[str] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : str = os.path.join(a_ , 'feat_extract.json' ) feat_extract_first.to_json_file(a_ ) SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class.from_json_file(a_ ) SCREAMING_SNAKE_CASE__ : int = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE__ : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE__ : Any = dict_first.pop('mel_filters' ) SCREAMING_SNAKE_CASE__ : Any = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(a_ , a_ ) ) self.assertEqual(a_ , a_ ) def __lowercase( self : Union[str, Any] )-> List[Any]: """simple docstring""" # Initialize feature_extractor SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor( a_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=a_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase( self : Any , a_ : List[str] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__ : Optional[int] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def __lowercase( self : Any )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : Any = TvltFeatureExtractor() SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor(a_ , return_tensors='pt' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1e-4 ) )
636
def _a ( lowercase__ : int , lowercase__ : int ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def _a ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
636
1
class snake_case : def __init__( self : Optional[Any] , a_ : str )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = val SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None def __lowercase( self : Tuple , a_ : Union[str, Any] )-> List[Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE__ : List[Any] = Node(a_ ) else: self.left.insert(a_ ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE__ : int = Node(a_ ) else: self.right.insert(a_ ) else: SCREAMING_SNAKE_CASE__ : int = val def _a ( lowercase__ : int , lowercase__ : Tuple ): '''simple docstring''' if root: inorder(root.left , lowercase__ ) res.append(root.val ) inorder(root.right , lowercase__ ) def _a ( lowercase__ : str ): '''simple docstring''' if len(lowercase__ ) == 0: return arr SCREAMING_SNAKE_CASE__ : List[Any] = Node(arr[0] ) for i in range(1 , len(lowercase__ ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE__ : Dict = [] inorder(lowercase__ , lowercase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
636
from math import factorial, radians def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__("doctest").testmod()
636
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class snake_case ( UpperCamelCase_ ): lowercase_ = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) lowercase_ = Features({'audio': Audio()} ) lowercase_ = Features({'labels': ClassLabel} ) lowercase_ = "audio" lowercase_ = "labels" def __lowercase( self : Tuple , a_ : Optional[int] )-> List[str]: """simple docstring""" if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , a_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ : List[Any] = self.label_schema.copy() SCREAMING_SNAKE_CASE__ : Any = features[self.label_column] SCREAMING_SNAKE_CASE__ : List[Any] = label_schema return task_template @property def __lowercase( self : Tuple )-> Dict[str, str]: """simple docstring""" return { self.audio_column: "audio", self.label_column: "labels", }
636
import math def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = factor * value SCREAMING_SNAKE_CASE__ : Dict = value while not is_prime(lowercase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **lowercase__ ) return value
636
1