code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , __a : Dict , __a : Optional[Any]=None , __a : Tuple=None , __a : str=None , __a : Dict="resnet50" , __a : List[str]=3 , __a : Any=32 , __a : Dict=3 , __a : str=True , __a : Tuple=True , ) -> Any: _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = out_indices if out_indices is not None else [4] _UpperCamelCase : Optional[Any] = stage_names _UpperCamelCase : Dict = out_features _UpperCamelCase : Optional[Any] = backbone _UpperCamelCase : Union[str, Any] = batch_size _UpperCamelCase : Union[str, Any] = image_size _UpperCamelCase : Optional[int] = num_channels _UpperCamelCase : str = use_pretrained_backbone _UpperCamelCase : List[Any] = is_training def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : Tuple = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self : int , __a : Any , __a : Optional[Any] ) -> List[str]: _UpperCamelCase : Tuple = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): _UpperCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: _UpperCamelCase : Any = self.prepare_config_and_inputs() _UpperCamelCase, _UpperCamelCase : Union[str, Any] = config_and_inputs _UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Dict = (TimmBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ :Dict = {"feature-extraction": TimmBackbone} if is_torch_available() else {} SCREAMING_SNAKE_CASE__ :List[str] = False SCREAMING_SNAKE_CASE__ :Optional[Any] = False SCREAMING_SNAKE_CASE__ :List[str] = False SCREAMING_SNAKE_CASE__ :Dict = False def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: _UpperCamelCase : Dict = TimmBackboneModelTester(self ) _UpperCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: _UpperCamelCase : Union[str, Any] = "resnet18" _UpperCamelCase : Optional[int] = "microsoft/resnet-18" _UpperCamelCase : Optional[int] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) _UpperCamelCase : str = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) _UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) _UpperCamelCase : List[str] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("TimmBackbone doesn't support feed forward chunking" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: pass @unittest.skip("TimmBackbone initialization is managed on the timm side" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip("TimmBackbone doesn't support output_attentions." ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: pass @unittest.skip("Safetensors is not supported by timm." ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: _UpperCamelCase, _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[str] = model_class(__a ) _UpperCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Dict = [*signature.parameters.keys()] _UpperCamelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : Optional[int] = True _UpperCamelCase : int = self.has_attentions # no need to test all models as different heads yield the same functionality _UpperCamelCase : str = self.all_model_classes[0] _UpperCamelCase : str = model_class(__a ) model.to(__a ) _UpperCamelCase : int = self._prepare_for_class(__a , __a ) _UpperCamelCase : Optional[Any] = model(**__a ) _UpperCamelCase : Union[str, Any] = outputs[0][-1] # Encoder-/Decoder-only models _UpperCamelCase : Tuple = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _UpperCamelCase : Any = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase, _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() _UpperCamelCase : Dict = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None _UpperCamelCase : List[str] = copy.deepcopy(__a ) _UpperCamelCase : Dict = None _UpperCamelCase : Dict = model_class(__a ) model.to(__a ) model.eval() _UpperCamelCase : List[Any] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights _UpperCamelCase : Dict = copy.deepcopy(__a ) _UpperCamelCase : int = False _UpperCamelCase : Optional[Any] = model_class(__a ) model.to(__a ) model.eval() _UpperCamelCase : Any = model(**__a )
310
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]: _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int: _UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) import datasets _UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _UpperCamelCase : List[Any] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @require_torch def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3" _UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) _UpperCamelCase : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = "facebook/detr-resnet-50" _UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Dict = "facebook/detr-resnet-50" _UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a ) _UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : Tuple = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: _UpperCamelCase : Tuple = 0.99_85 _UpperCamelCase : List[Any] = "facebook/detr-resnet-50" _UpperCamelCase : List[str] = pipeline("object-detection" , model=__a ) _UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd" _UpperCamelCase : int = 0.99_93 _UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a ) _UpperCamelCase : Union[str, Any] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
310
1
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = None SCREAMING_SNAKE_CASE__ :int = BloomTokenizerFast SCREAMING_SNAKE_CASE__ :Tuple = BloomTokenizerFast SCREAMING_SNAKE_CASE__ :Dict = True SCREAMING_SNAKE_CASE__ :Dict = False SCREAMING_SNAKE_CASE__ :Optional[int] = "tokenizer_file" SCREAMING_SNAKE_CASE__ :int = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: super().setUp() _UpperCamelCase : int = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self : str , **__a : Tuple ) -> List[str]: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Any: _UpperCamelCase : int = self.get_rust_tokenizer() _UpperCamelCase : List[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] _UpperCamelCase : int = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] _UpperCamelCase : List[str] = tokenizer.batch_encode_plus(__a )["input_ids"] self.assertListEqual(__a , __a ) _UpperCamelCase : List[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int]=6 ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input _UpperCamelCase : str = "This is a simple input" _UpperCamelCase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Tuple = ("This is a simple input", "This is a pair") _UpperCamelCase : List[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(__a , max_length=__a ) tokenizer_r.encode_plus(__a , max_length=__a ) tokenizer_r.batch_encode_plus(__a , max_length=__a ) tokenizer_r.encode(__a , max_length=__a ) tokenizer_r.batch_encode_plus(__a , max_length=__a ) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding" ) _UpperCamelCase : Tuple = None # Hotfixing padding = None self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: _UpperCamelCase : Dict = self.get_rust_tokenizer() _UpperCamelCase : Optional[Any] = load_dataset("xnli" , "all_languages" , split="test" , streaming=__a ) _UpperCamelCase : List[str] = next(iter(__a ) )["premise"] # pick up one data _UpperCamelCase : List[str] = list(sample_data.values() ) _UpperCamelCase : Any = list(map(tokenizer.encode , __a ) ) _UpperCamelCase : List[Any] = [tokenizer.decode(__a , clean_up_tokenization_spaces=__a ) for x in output_tokens] self.assertListEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
310
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCamelCase__ = {"UserAgent": UserAgent().random} def lowercase__ ( lowercase_ ) -> dict: """simple docstring""" _UpperCamelCase : str = script.contents[0] _UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : str ) -> Tuple: _UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/''' _UpperCamelCase : Optional[Any] = self.get_json() def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict: _UpperCamelCase : int = requests.get(self.url , headers=__a ).text _UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : List[Any] ) -> str: return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : str ) -> str: return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: return self.user_data["username"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["full_name"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return self.user_data["biography"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["business_email"] @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["external_url"] @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.user_data["edge_followed_by"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.user_data["edge_follow"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool: return self.user_data["is_verified"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: return self.user_data["is_private"] def lowercase__ ( lowercase_ = "github" ) -> None: """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions _UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data ,lowercase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = InstagramUser("github") print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
310
1
"""simple docstring""" def lowercase__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Tuple = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCamelCase : List[str] = 6 _UpperCamelCase : Union[str, Any] = 1 _UpperCamelCase : int = 1_901 _UpperCamelCase : Optional[Any] = 0 while year < 2_001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCamelCase : Tuple = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCamelCase : Any = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCamelCase : List[str] = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCamelCase : Any = 1 if year < 2_001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
310
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[Any] = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : Any = _sin / (2 * q_factor) _UpperCamelCase : str = (1 - _cos) / 2 _UpperCamelCase : Any = 1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : List[str] = -2 * _cos _UpperCamelCase : Tuple = 1 - alpha _UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : List[str] = tau * frequency / samplerate _UpperCamelCase : str = sin(lowercase_ ) _UpperCamelCase : Optional[Any] = cos(lowercase_ ) _UpperCamelCase : Dict = _sin / (2 * q_factor) _UpperCamelCase : List[Any] = (1 + _cos) / 2 _UpperCamelCase : Optional[int] = -1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : str = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Tuple = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Dict = _sin / 2 _UpperCamelCase : int = 0 _UpperCamelCase : str = -ba _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : Optional[int] = -2 * _cos _UpperCamelCase : Optional[Any] = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : str = tau * frequency / samplerate _UpperCamelCase : Optional[Any] = sin(lowercase_ ) _UpperCamelCase : Optional[int] = cos(lowercase_ ) _UpperCamelCase : int = _sin / (2 * q_factor) _UpperCamelCase : List[str] = 1 - alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : Union[str, Any] = 1 + alpha _UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : int = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : List[Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Optional[int] = 10 ** (gain_db / 40) _UpperCamelCase : str = 1 + alpha * big_a _UpperCamelCase : Union[str, Any] = -2 * _cos _UpperCamelCase : Optional[int] = 1 - alpha * big_a _UpperCamelCase : int = 1 + alpha / big_a _UpperCamelCase : Optional[Any] = -2 * _cos _UpperCamelCase : Any = 1 - alpha / big_a _UpperCamelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Union[str, Any] = tau * frequency / samplerate _UpperCamelCase : Any = sin(lowercase_ ) _UpperCamelCase : Union[str, Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : Any = big_a * (pmc + aaa) _UpperCamelCase : Dict = 2 * big_a * mpc _UpperCamelCase : str = big_a * (pmc - aaa) _UpperCamelCase : Dict = ppmc + aaa _UpperCamelCase : List[Any] = -2 * pmpc _UpperCamelCase : Dict = ppmc - aaa _UpperCamelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[int] = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : Any = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : str = 10 ** (gain_db / 40) _UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : List[Any] = big_a * (ppmc + aaa) _UpperCamelCase : Dict = -2 * big_a * pmpc _UpperCamelCase : Dict = big_a * (ppmc - aaa) _UpperCamelCase : Optional[Any] = pmc + aaa _UpperCamelCase : Any = 2 * mpc _UpperCamelCase : Any = pmc - aaa _UpperCamelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
310
1
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowerCamelCase__ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n" lowerCamelCase__ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n" lowerCamelCase__ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" return float((preds == labels).mean() ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_="binary" ) -> Any: """simple docstring""" _UpperCamelCase : str = simple_accuracy(lowercase_ ,lowercase_ ) _UpperCamelCase : List[Any] = float(fa_score(y_true=lowercase_ ,y_pred=lowercase_ ,average=lowercase_ ) ) return { "accuracy": acc, "f1": fa, } def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : Dict = {} for id_pred, label in zip(lowercase_ ,lowercase_ ): _UpperCamelCase : Optional[int] = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' _UpperCamelCase : Optional[int] = id_pred["prediction"] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _UpperCamelCase : List[Any] = [(pred, label)] _UpperCamelCase, _UpperCamelCase : int = [], [] for question, preds_labels in question_map.items(): _UpperCamelCase, _UpperCamelCase : Any = zip(*lowercase_ ) _UpperCamelCase : Union[str, Any] = fa_score(y_true=lowercase_ ,y_pred=lowercase_ ,average="macro" ) fas.append(lowercase_ ) _UpperCamelCase : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) ) ems.append(lowercase_ ) _UpperCamelCase : Tuple = float(sum(lowercase_ ) / len(lowercase_ ) ) _UpperCamelCase : Union[str, Any] = sum(lowercase_ ) / len(lowercase_ ) _UpperCamelCase : Any = float(fa_score(y_true=lowercase_ ,y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "prediction_text": datasets.Value("string" ), }, "references": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "answers": datasets.Sequence(datasets.Value("string" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("int64" ), "paragraph": datasets.Value("int64" ), "question": datasets.Value("int64" ), }, "prediction": datasets.Value("int64" ), }, "references": datasets.Value("int64" ), } else: return { "predictions": datasets.Value("int64" ), "references": datasets.Value("int64" ), } def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str] ) -> int: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__a , __a )} elif self.config_name == "cb": return acc_and_fa(__a , __a , fa_avg="macro" ) elif self.config_name == "record": _UpperCamelCase : Optional[Any] = [ { "qas": [ {"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]} for ref in references ] } ] _UpperCamelCase : str = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions} return evaluate_record(__a , __a )[0] elif self.config_name == "multirc": return evaluate_multirc(__a , __a ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__a , __a )} else: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
310
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : Dict , **__a : List[str] ) -> Any: pass def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. lowerCamelCase__ = ( "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" ) @is_pipeline_test @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any] , __a : Tuple , __a : Optional[Any] ) -> Tuple: _UpperCamelCase : Dict = pipeline( "document-question-answering" , model=__a , tokenizer=__a , image_processor=__a ) _UpperCamelCase : Dict = INVOICE_URL _UpperCamelCase : str = list(zip(*apply_tesseract(load_image(__a ) , __a , "" ) ) ) _UpperCamelCase : str = "What is the placebo?" _UpperCamelCase : str = [ { "image": load_image(__a ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Union[str, Any] , __a : Optional[int] ) -> str: _UpperCamelCase : Tuple = dqa_pipeline(__a , top_k=2 ) self.assertEqual( __a , [ [ {"score": ANY(__a ), "answer": ANY(__a ), "start": ANY(__a ), "end": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a ), "start": ANY(__a ), "end": ANY(__a )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: _UpperCamelCase : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) _UpperCamelCase : Tuple = INVOICE_URL _UpperCamelCase : Optional[Any] = "How many cats are there?" _UpperCamelCase : Tuple = [ {"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] _UpperCamelCase : Optional[int] = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual(nested_simplify(__a , decimals=4 ) , __a ) _UpperCamelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__a , decimals=4 ) , __a ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably _UpperCamelCase : List[str] = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCamelCase : Dict = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual(__a , [] ) # We can optionnally pass directly the words and bounding boxes _UpperCamelCase : Optional[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [] _UpperCamelCase : List[Any] = dqa_pipeline(image=__a , question=__a , words=__a , boxes=__a , top_k=2 ) self.assertEqual(__a , [] ) @slow @require_torch @require_detectrona @require_pytesseract def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: _UpperCamelCase : Optional[Any] = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) _UpperCamelCase : List[Any] = INVOICE_URL _UpperCamelCase : Dict = "What is the invoice number?" _UpperCamelCase : Dict = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ] , ) _UpperCamelCase : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ] , ) _UpperCamelCase : List[Any] = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: _UpperCamelCase : List[Any] = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) _UpperCamelCase : List[Any] = INVOICE_URL _UpperCamelCase : Optional[Any] = "What is the invoice number?" _UpperCamelCase : Union[str, Any] = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] , ) _UpperCamelCase : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] , ) _UpperCamelCase : str = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__a ) _UpperCamelCase : str = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__a , revision="3dc6de3" , ) _UpperCamelCase : List[str] = INVOICE_URL _UpperCamelCase : Optional[Any] = "What is the invoice number?" _UpperCamelCase : Any = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] , ) _UpperCamelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] , ) _UpperCamelCase : int = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) _UpperCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(__a ) , __a , "" ) ) ) # This model should also work if `image` is set to None _UpperCamelCase : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: _UpperCamelCase : int = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__a ) _UpperCamelCase : Tuple = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__a , revision="3dc6de3" , max_seq_len=50 , ) _UpperCamelCase : Union[str, Any] = INVOICE_URL _UpperCamelCase : int = "What is the invoice number?" _UpperCamelCase : Union[str, Any] = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] , ) _UpperCamelCase : Any = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) _UpperCamelCase : Optional[Any] = list(zip(*apply_tesseract(load_image(__a ) , __a , "" ) ) ) # This model should also work if `image` is set to None _UpperCamelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) _UpperCamelCase : int = INVOICE_URL _UpperCamelCase : Union[str, Any] = "What is the invoice number?" _UpperCamelCase : Optional[int] = dqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual(nested_simplify(__a , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: pass
310
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = prime_factors(lowercase_ ) if is_square_free(lowercase_ ): return -1 if len(lowercase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"} lowerCamelCase__ = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, "tokenizer_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json", }, } lowerCamelCase__ = { "google/rembert": 256, } lowerCamelCase__ = "▁" class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ :Union[str, Any] = RemBertTokenizer def __init__( self : str , __a : List[str]=None , __a : Optional[Any]=None , __a : Optional[Any]=True , __a : Any=True , __a : Optional[int]=False , __a : List[Any]="[CLS]" , __a : Dict="[SEP]" , __a : int="<unk>" , __a : List[str]="[SEP]" , __a : List[Any]="<pad>" , __a : Union[str, Any]="[CLS]" , __a : Optional[int]="[MASK]" , **__a : Optional[int] , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , ) _UpperCamelCase : List[Any] = do_lower_case _UpperCamelCase : Any = remove_space _UpperCamelCase : List[Any] = keep_accents _UpperCamelCase : Optional[Any] = vocab_file _UpperCamelCase : Any = False if not self.vocab_file else True def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: _UpperCamelCase : List[Any] = [self.sep_token_id] _UpperCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__a ): logger.error("Vocabulary path ({}) should be a directory".format(__a ) ) return _UpperCamelCase : List[Any] = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
310
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ :Dict = True SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True} SCREAMING_SNAKE_CASE__ :Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] _UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCamelCase : str = {"unk_token": "<unk>"} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple: _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: _UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase : Optional[Any] = "lower newer" _UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] _UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : str = tokens + [tokenizer.unk_token] _UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: if not self.test_rust_tokenizer: return _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = "lower newer" # Testing tokenization _UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens _UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens _UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token _UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token] _UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input _UpperCamelCase : Optional[int] = "This is a simple input" _UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Dict = ("This is a simple input", "This is a pair") _UpperCamelCase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: _UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input _UpperCamelCase : Union[str, Any] = "This is a simple input" _UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"] _UpperCamelCase : str = ("This is a simple input", "This is a pair") _UpperCamelCase : List[str] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] _UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id _UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" ) _UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) _UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" ) _UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: _UpperCamelCase : Any = "$$$" _UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) _UpperCamelCase : int = "This is a simple input" _UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id _UpperCamelCase : str = tokenizer(__a ) _UpperCamelCase : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids ) _UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase : Tuple = "Encode this." _UpperCamelCase : List[str] = "This one too please." _UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a ) encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a ) _UpperCamelCase : int = tokenizer.encode_plus( __a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , ) _UpperCamelCase : str = encoded_sequence_dict["input_ids"] _UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(__a ) , len(__a ) ) _UpperCamelCase : Union[str, Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__a ) ] _UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(__a , __a ) @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Any = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) _UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" ) _UpperCamelCase : Optional[Any] = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: _UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Union[str, Any] = tokenizer.encode( __a , ) # Same as above self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[str] = "bos" _UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"] _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : List[Any] = tokenizer.encode( __a , ) # We changed the bos token self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) _UpperCamelCase : Tuple = tokenizer.encode( __a , ) self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
310
1
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" with open(lowercase_ ) as metadata_file: _UpperCamelCase : Dict = json.load(lowercase_ ) _UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"] # Load the entity vocab file _UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ ) # add an entry for [MASK2] _UpperCamelCase : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) _UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f: _UpperCamelCase : Tuple = json.load(lowercase_ ) _UpperCamelCase : Optional[int] = "MLukeTokenizer" with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) _UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ ) # Initialize the embeddings of the special tokens _UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0] _UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0] _UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) _UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCamelCase : Optional[Any] = state_dict[bias_name] _UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.''' _UpperCamelCase : List[Any] = state_dict[prefix + matrix_name] _UpperCamelCase : str = state_dict[prefix + matrix_name] _UpperCamelCase : Any = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCamelCase : int = state_dict["entity_predictions.bias"] _UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _UpperCamelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _UpperCamelCase : Union[str, Any] = state_dict[key] else: _UpperCamelCase : Dict = state_dict[key] _UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ ) if set(lowercase_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(lowercase_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" ) _UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _UpperCamelCase : Optional[Any] = (0, 9) _UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : List[str] = model(**lowercase_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 33, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 1, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ) _UpperCamelCase : int = "Tokyo is the capital of <mask>." _UpperCamelCase : List[Any] = (24, 30) _UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : Optional[Any] = model(**lowercase_ ) _UpperCamelCase : int = encoding["input_ids"][0].tolist() _UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase_ ) _UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item() _UpperCamelCase : Tuple = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase_ ) ) model.save_pretrained(lowercase_ ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"] _UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )] _UpperCamelCase : List[str] = {} for entry in data: _UpperCamelCase : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCamelCase : Dict = entity_id break _UpperCamelCase : Dict = F'''{language}:{entity_name}''' _UpperCamelCase : str = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
310
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = load_tool("text-question-answering" ) self.tool.setup() _UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" )
310
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
310
"""simple docstring""" lowerCamelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" _UpperCamelCase : Tuple = [False] * len(lowercase_ ) _UpperCamelCase : Dict = [s] _UpperCamelCase : List[str] = True while queue: _UpperCamelCase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase_ ) _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = u return visited[t] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = [-1] * (len(lowercase_ )) _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ): _UpperCamelCase : int = float("Inf" ) _UpperCamelCase : Optional[Any] = sink while s != source: # Find the minimum value in select path _UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] ) _UpperCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _UpperCamelCase : Union[str, Any] = sink while v != source: _UpperCamelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase : Dict = parent[v] for i in range(len(lowercase_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
310
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = ["pixel_values"] def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256} _UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" ) _UpperCamelCase : str = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : int = do_center_crop _UpperCamelCase : int = crop_size _UpperCamelCase : Optional[Any] = resample _UpperCamelCase : Dict = do_rescale _UpperCamelCase : Any = rescale_factor _UpperCamelCase : Any = offset _UpperCamelCase : Union[str, Any] = do_normalize _UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" in size: _UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: _UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]: _UpperCamelCase : Any = image.astype(np.floataa ) if offset: _UpperCamelCase : Dict = image - (scale / 2) return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = to_numpy_array(__a ) if do_resize: _UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a ) if do_center_crop: _UpperCamelCase : Dict = self.center_crop(__a , size=__a ) if do_rescale: _UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a ) if do_normalize: _UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a ) _UpperCamelCase : str = to_channel_dimension_format(__a , __a ) return image def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: _UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = offset if offset is not None else self.offset _UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCamelCase : Union[str, Any] = make_batched(__a ) _UpperCamelCase : Optional[Any] = [ [ self._preprocess_image( image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , ) for img in video ] for video in videos ] _UpperCamelCase : List[Any] = {"pixel_values": videos} return BatchFeature(data=__a , tensor_type=__a )
310
1
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase__ = True except ImportError: lowerCamelCase__ = False try: from torch.hub import _get_torch_home lowerCamelCase__ = _get_torch_home() except ImportError: lowerCamelCase__ = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) lowerCamelCase__ = os.path.join(torch_cache_home, "transformers") lowerCamelCase__ = "https://cdn.huggingface.co" lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert" lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) lowerCamelCase__ = os.path.join(PATH, "config.yaml") lowerCamelCase__ = os.path.join(PATH, "attributes.txt") lowerCamelCase__ = os.path.join(PATH, "objects.txt") lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase__ = "pytorch_model.bin" lowerCamelCase__ = "config.yaml" def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _UpperCamelCase : Any = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = OrderedDict() with open(lowercase_ ,"rb" ) as f: _UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _UpperCamelCase : List[str] = ckp.pop(lowercase_ ) if isinstance(lowercase_ ,np.ndarray ): _UpperCamelCase : List[Any] = torch.tensor(lowercase_ ) else: assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ ) _UpperCamelCase : Optional[Any] = v return r class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = {} def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any: _UpperCamelCase : Optional[Any] = name _UpperCamelCase : Optional[Any] = level _UpperCamelCase : Union[str, Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _UpperCamelCase : Optional[int] = copy.deepcopy(__a ) _UpperCamelCase : Dict = copy.deepcopy(__a ) if isinstance(__a , __a ): _UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 ) _UpperCamelCase : Optional[Any] = v setattr(self , __a , __a ) _UpperCamelCase : Optional[Any] = d def __repr__( self : List[str] ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int: _UpperCamelCase : Any = val _UpperCamelCase : Optional[Any] = val _UpperCamelCase : Dict = key.split("." ) _UpperCamelCase : int = len(__a ) - 1 _UpperCamelCase : List[str] = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _UpperCamelCase : str = val else: _UpperCamelCase : List[str] = pointer[l] def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self._pointer def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict: with open(F'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]: with open(F'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]: with open(__a ) as stream: _UpperCamelCase : int = load(__a , Loader=__a ) return data def __str__( self : List[str] ) -> Tuple: _UpperCamelCase : List[str] = " " if self._name != "root": _UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n''' else: _UpperCamelCase : Any = "" _UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _UpperCamelCase : Optional[Any] = level return r[:-1] @classmethod def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a ) _UpperCamelCase : str = kwargs.pop("resume_download" , __a ) _UpperCamelCase : Any = kwargs.pop("proxies" , __a ) _UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _UpperCamelCase : Optional[Any] = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _UpperCamelCase : Optional[int] = pretrained_model_name_or_path else: _UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _UpperCamelCase : Optional[int] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _UpperCamelCase : List[Any] = Config.load_yaml(__a ) except EnvironmentError: _UpperCamelCase : Union[str, Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device ) _UpperCamelCase : str = in_tensor.numpy() _UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), ( F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Dict = urlparse(lowercase_ ) return parsed.scheme in ("http", "https") def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str: """simple docstring""" _UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowercase_ ,lowercase_ ): ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() ) elif isinstance(lowercase_ ,lowercase_ ): ua += "; " + user_agent _UpperCamelCase : Any = {"user-agent": ua} if resume_size > 0: _UpperCamelCase : str = "bytes=%d-" % (resume_size,) _UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ ) if response.status_code == 416: # Range not satisfiable return _UpperCamelCase : List[str] = response.headers.get("Content-Length" ) _UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None _UpperCamelCase : Optional[int] = tqdm( unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowercase_ ) ) temp_file.write(lowercase_ ) progress.close() def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple: """simple docstring""" if cache_dir is None: _UpperCamelCase : str = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Dict = str(lowercase_ ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) _UpperCamelCase : Dict = None if not local_files_only: try: _UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ ) if response.status_code == 200: _UpperCamelCase : str = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ ) # get cache path to put the file _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowercase_ ): return cache_path else: _UpperCamelCase : Optional[int] = [ file for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(lowercase_ ) > 0: return os.path.join(lowercase_ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(lowercase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _UpperCamelCase : Dict = cache_path + ".lock" with FileLock(lowercase_ ): # If the download just completed while the lock was activated. if os.path.exists(lowercase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _UpperCamelCase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(lowercase_ ,"a+b" ) as f: yield f _UpperCamelCase : Union[str, Any] = _resumable_file_manager if os.path.exists(lowercase_ ): _UpperCamelCase : str = os.stat(lowercase_ ).st_size else: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ ) _UpperCamelCase : Optional[Any] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,) http_get( lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,) os.replace(temp_file.name ,lowercase_ ) _UpperCamelCase : Optional[int] = {"url": url, "etag": etag} _UpperCamelCase : List[str] = cache_path + ".json" with open(lowercase_ ,"w" ) as meta_file: json.dump(lowercase_ ,lowercase_ ) return cache_path def lowercase__ ( lowercase_ ,lowercase_=None ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = url.encode("utf-8" ) _UpperCamelCase : List[str] = shaaaa(lowercase_ ) _UpperCamelCase : List[str] = url_hash.hexdigest() if etag: _UpperCamelCase : Optional[Any] = etag.encode("utf-8" ) _UpperCamelCase : Optional[Any] = shaaaa(lowercase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str: """simple docstring""" if cache_dir is None: _UpperCamelCase : List[Any] = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if is_remote_url(lowercase_ ): # URL, so get it from the cache (downloading if necessary) _UpperCamelCase : Union[str, Any] = get_from_cache( lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,) elif os.path.exists(lowercase_ ): # File, and it exists. _UpperCamelCase : List[str] = url_or_filename elif urlparse(lowercase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(lowercase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) ) if extract_compressed_file: if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ ) _UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted" _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions _UpperCamelCase : Optional[int] = output_path + ".lock" with FileLock(lowercase_ ): shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ ) os.makedirs(lowercase_ ) if is_zipfile(lowercase_ ): with ZipFile(lowercase_ ,"r" ) as zip_file: zip_file.extractall(lowercase_ ) zip_file.close() elif tarfile.is_tarfile(lowercase_ ): _UpperCamelCase : int = tarfile.open(lowercase_ ) tar_file.extractall(lowercase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) ) return output_path_extracted return output_path def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): with open(lowercase_ ) as f: _UpperCamelCase : Tuple = eval(f.read() ) else: _UpperCamelCase : str = requests.get(lowercase_ ) try: _UpperCamelCase : Optional[int] = requests.json() except Exception: _UpperCamelCase : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: _UpperCamelCase : List[Any] = eval(lowercase_ ) except Exception: _UpperCamelCase : int = data.split("\n" ) req.close() return data def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[Any] = requests.get(lowercase_ ) _UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowercase_ ) with open(lowercase_ ,"rb" ) as stream: _UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ ) _UpperCamelCase : Union[str, Any] = weights.pop("model" ) _UpperCamelCase : Optional[int] = {} for k, v in model.items(): _UpperCamelCase : str = torch.from_numpy(lowercase_ ) if "running_var" in k: _UpperCamelCase : List[Any] = torch.tensor([0] ) _UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" ) _UpperCamelCase : Any = zero return new def lowercase__ ( ) -> Dict: """simple docstring""" print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' ) def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): _UpperCamelCase : Optional[Any] = cva.imread(lowercase_ ) else: _UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ ) assert img is not None, F'''could not connect to: {im}''' _UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": _UpperCamelCase : List[Any] = img[:, :, ::-1] return img def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]: """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
310
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowercase__ ( lowercase_ ,lowercase_=1 ) -> Union[str, Any]: """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def lowercase__ ( lowercase_ ,lowercase_=0 ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : Union[str, Any] = [] for old_item in old_list: _UpperCamelCase : List[Any] = old_item.replace("in_layers.0" ,"norm1" ) _UpperCamelCase : Optional[Any] = new_item.replace("in_layers.2" ,"conv1" ) _UpperCamelCase : Any = new_item.replace("out_layers.0" ,"norm2" ) _UpperCamelCase : List[str] = new_item.replace("out_layers.3" ,"conv2" ) _UpperCamelCase : List[Any] = new_item.replace("emb_layers.1" ,"time_emb_proj" ) _UpperCamelCase : List[str] = new_item.replace("skip_connection" ,"conv_shortcut" ) _UpperCamelCase : int = shave_segments(lowercase_ ,n_shave_prefix_segments=lowercase_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowercase__ ( lowercase_ ,lowercase_=0 ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : Dict = [] for old_item in old_list: _UpperCamelCase : List[str] = old_item _UpperCamelCase : str = new_item.replace("norm.weight" ,"group_norm.weight" ) _UpperCamelCase : Optional[int] = new_item.replace("norm.bias" ,"group_norm.bias" ) _UpperCamelCase : Union[str, Any] = new_item.replace("proj_out.weight" ,"proj_attn.weight" ) _UpperCamelCase : str = new_item.replace("proj_out.bias" ,"proj_attn.bias" ) _UpperCamelCase : List[Any] = shave_segments(lowercase_ ,n_shave_prefix_segments=lowercase_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ) -> Dict: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _UpperCamelCase : Tuple = old_checkpoint[path] _UpperCamelCase : int = old_tensor.shape[0] // 3 _UpperCamelCase : Tuple = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _UpperCamelCase : List[str] = old_tensor.shape[0] // config["num_head_channels"] // 3 _UpperCamelCase : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = old_tensor.split(channels // num_heads ,dim=1 ) _UpperCamelCase : Any = query.reshape(lowercase_ ) _UpperCamelCase : Tuple = key.reshape(lowercase_ ) _UpperCamelCase : List[Any] = value.reshape(lowercase_ ) for path in paths: _UpperCamelCase : Dict = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _UpperCamelCase : Optional[int] = new_path.replace("middle_block.0" ,"mid_block.resnets.0" ) _UpperCamelCase : Optional[Any] = new_path.replace("middle_block.1" ,"mid_block.attentions.0" ) _UpperCamelCase : int = new_path.replace("middle_block.2" ,"mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: _UpperCamelCase : Tuple = new_path.replace(replacement["old"] ,replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _UpperCamelCase : List[str] = old_checkpoint[path["old"]][:, :, 0] else: _UpperCamelCase : Union[str, Any] = old_checkpoint[path["old"]] def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : List[str] = {} _UpperCamelCase : Optional[int] = checkpoint["time_embed.0.weight"] _UpperCamelCase : Dict = checkpoint["time_embed.0.bias"] _UpperCamelCase : Union[str, Any] = checkpoint["time_embed.2.weight"] _UpperCamelCase : List[Any] = checkpoint["time_embed.2.bias"] _UpperCamelCase : List[str] = checkpoint["input_blocks.0.0.weight"] _UpperCamelCase : int = checkpoint["input_blocks.0.0.bias"] _UpperCamelCase : Dict = checkpoint["out.0.weight"] _UpperCamelCase : List[Any] = checkpoint["out.0.bias"] _UpperCamelCase : str = checkpoint["out.2.weight"] _UpperCamelCase : Dict = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only _UpperCamelCase : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) _UpperCamelCase : str = { layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(lowercase_ ) } # Retrieves the keys for the middle blocks only _UpperCamelCase : Optional[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) _UpperCamelCase : List[Any] = { layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(lowercase_ ) } # Retrieves the keys for the output blocks only _UpperCamelCase : Union[str, Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) _UpperCamelCase : Tuple = { layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(lowercase_ ) } for i in range(1 ,lowercase_ ): _UpperCamelCase : Union[str, Any] = (i - 1) // (config["num_res_blocks"] + 1) _UpperCamelCase : Any = (i - 1) % (config["num_res_blocks"] + 1) _UpperCamelCase : Optional[int] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _UpperCamelCase : Union[str, Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _UpperCamelCase : Any = checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _UpperCamelCase : Dict = checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _UpperCamelCase : Optional[Any] = renew_resnet_paths(lowercase_ ) _UpperCamelCase : Any = {"old": F'''input_blocks.{i}.0''', "new": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _UpperCamelCase : List[Any] = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( lowercase_ ,lowercase_ ,lowercase_ ,additional_replacements=[meta_path, resnet_op] ,config=lowercase_ ) if len(lowercase_ ): _UpperCamelCase : Tuple = renew_attention_paths(lowercase_ ) _UpperCamelCase : Union[str, Any] = { "old": F'''input_blocks.{i}.1''', "new": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _UpperCamelCase : Dict = { F'''input_blocks.{i}.1.qkv.bias''': { "key": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', "query": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', "value": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { "key": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', "query": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', "value": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( lowercase_ ,lowercase_ ,lowercase_ ,additional_replacements=[meta_path] ,attention_paths_to_split=lowercase_ ,config=lowercase_ ,) _UpperCamelCase : Union[str, Any] = middle_blocks[0] _UpperCamelCase : str = middle_blocks[1] _UpperCamelCase : Optional[Any] = middle_blocks[2] _UpperCamelCase : List[str] = renew_resnet_paths(lowercase_ ) assign_to_checkpoint(lowercase_ ,lowercase_ ,lowercase_ ,config=lowercase_ ) _UpperCamelCase : Union[str, Any] = renew_resnet_paths(lowercase_ ) assign_to_checkpoint(lowercase_ ,lowercase_ ,lowercase_ ,config=lowercase_ ) _UpperCamelCase : List[Any] = renew_attention_paths(lowercase_ ) _UpperCamelCase : str = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( lowercase_ ,lowercase_ ,lowercase_ ,attention_paths_to_split=lowercase_ ,config=lowercase_ ) for i in range(lowercase_ ): _UpperCamelCase : str = i // (config["num_res_blocks"] + 1) _UpperCamelCase : int = i % (config["num_res_blocks"] + 1) _UpperCamelCase : str = [shave_segments(lowercase_ ,2 ) for name in output_blocks[i]] _UpperCamelCase : Optional[Any] = {} for layer in output_block_layers: _UpperCamelCase, _UpperCamelCase : Tuple = layer.split("." )[0], shave_segments(lowercase_ ,1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowercase_ ) else: _UpperCamelCase : int = [layer_name] if len(lowercase_ ) > 1: _UpperCamelCase : Dict = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _UpperCamelCase : Tuple = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _UpperCamelCase : Any = renew_resnet_paths(lowercase_ ) _UpperCamelCase : int = renew_resnet_paths(lowercase_ ) _UpperCamelCase : Optional[int] = {"old": F'''output_blocks.{i}.0''', "new": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(lowercase_ ,lowercase_ ,lowercase_ ,additional_replacements=[meta_path] ,config=lowercase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _UpperCamelCase : Union[str, Any] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) _UpperCamelCase : Dict = checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _UpperCamelCase : List[str] = checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(lowercase_ ) == 2: _UpperCamelCase : List[str] = [] if len(lowercase_ ): _UpperCamelCase : int = renew_attention_paths(lowercase_ ) _UpperCamelCase : Any = { "old": F'''output_blocks.{i}.1''', "new": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _UpperCamelCase : int = { F'''output_blocks.{i}.1.qkv.bias''': { "key": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', "query": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', "value": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { "key": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', "query": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', "value": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( lowercase_ ,lowercase_ ,lowercase_ ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=lowercase_ ,) else: _UpperCamelCase : List[Any] = renew_resnet_paths(lowercase_ ,n_shave_prefix_segments=1 ) for path in resnet_0_paths: _UpperCamelCase : Optional[int] = ".".join(["output_blocks", str(lowercase_ ), path["old"]] ) _UpperCamelCase : List[Any] = ".".join(["up_blocks", str(lowercase_ ), "resnets", str(lowercase_ ), path["new"]] ) _UpperCamelCase : int = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = torch.load(args.checkpoint_path) with open(args.config_file) as f: lowerCamelCase__ = json.loads(f.read()) lowerCamelCase__ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] lowerCamelCase__ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: lowerCamelCase__ = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) lowerCamelCase__ = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) lowerCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
310
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def lowercase__ ( lowercase_ ) -> list[list[float]]: """simple docstring""" _UpperCamelCase : int = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix _UpperCamelCase : Tuple = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creates a copy of the matrix with swapped positions of the elements _UpperCamelCase : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]] _UpperCamelCase, _UpperCamelCase : Optional[int] = matrix[1][1], matrix[0][0] _UpperCamelCase, _UpperCamelCase : Any = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(lowercase_ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule _UpperCamelCase : Optional[int] = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creating cofactor matrix _UpperCamelCase : Union[str, Any] = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] _UpperCamelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) _UpperCamelCase : Any = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) _UpperCamelCase : Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) _UpperCamelCase : int = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) _UpperCamelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) _UpperCamelCase : str = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) _UpperCamelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) _UpperCamelCase : Optional[int] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) _UpperCamelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) _UpperCamelCase : str = array(lowercase_ ) for i in range(3 ): for j in range(3 ): _UpperCamelCase : Union[str, Any] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix _UpperCamelCase : Tuple = array(lowercase_ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(lowercase_ ) # Calculate the inverse of the matrix return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
310
"""simple docstring""" from typing import Any def lowercase__ ( lowercase_ ) -> list[Any]: """simple docstring""" if not input_list: return [] _UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list] _UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[Any] = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : Any = _sin / (2 * q_factor) _UpperCamelCase : str = (1 - _cos) / 2 _UpperCamelCase : Any = 1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : List[str] = -2 * _cos _UpperCamelCase : Tuple = 1 - alpha _UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : List[str] = tau * frequency / samplerate _UpperCamelCase : str = sin(lowercase_ ) _UpperCamelCase : Optional[Any] = cos(lowercase_ ) _UpperCamelCase : Dict = _sin / (2 * q_factor) _UpperCamelCase : List[Any] = (1 + _cos) / 2 _UpperCamelCase : Optional[int] = -1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : str = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Tuple = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Dict = _sin / 2 _UpperCamelCase : int = 0 _UpperCamelCase : str = -ba _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : Optional[int] = -2 * _cos _UpperCamelCase : Optional[Any] = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : str = tau * frequency / samplerate _UpperCamelCase : Optional[Any] = sin(lowercase_ ) _UpperCamelCase : Optional[int] = cos(lowercase_ ) _UpperCamelCase : int = _sin / (2 * q_factor) _UpperCamelCase : List[str] = 1 - alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : Union[str, Any] = 1 + alpha _UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : int = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : List[Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Optional[int] = 10 ** (gain_db / 40) _UpperCamelCase : str = 1 + alpha * big_a _UpperCamelCase : Union[str, Any] = -2 * _cos _UpperCamelCase : Optional[int] = 1 - alpha * big_a _UpperCamelCase : int = 1 + alpha / big_a _UpperCamelCase : Optional[Any] = -2 * _cos _UpperCamelCase : Any = 1 - alpha / big_a _UpperCamelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Union[str, Any] = tau * frequency / samplerate _UpperCamelCase : Any = sin(lowercase_ ) _UpperCamelCase : Union[str, Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : Any = big_a * (pmc + aaa) _UpperCamelCase : Dict = 2 * big_a * mpc _UpperCamelCase : str = big_a * (pmc - aaa) _UpperCamelCase : Dict = ppmc + aaa _UpperCamelCase : List[Any] = -2 * pmpc _UpperCamelCase : Dict = ppmc - aaa _UpperCamelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[int] = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : Any = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : str = 10 ** (gain_db / 40) _UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : List[Any] = big_a * (ppmc + aaa) _UpperCamelCase : Dict = -2 * big_a * pmpc _UpperCamelCase : Dict = big_a * (ppmc - aaa) _UpperCamelCase : Optional[Any] = pmc + aaa _UpperCamelCase : Any = 2 * mpc _UpperCamelCase : Any = pmc - aaa _UpperCamelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
310
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = "rag" SCREAMING_SNAKE_CASE__ :List[str] = True def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any: super().__init__( bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" ) _UpperCamelCase : str = question_encoder_config.pop("model_type" ) _UpperCamelCase : Tuple = kwargs.pop("generator" ) _UpperCamelCase : str = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : str = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : Optional[int] = reduce_loss _UpperCamelCase : str = label_smoothing _UpperCamelCase : int = exclude_bos_score _UpperCamelCase : List[str] = do_marginalize _UpperCamelCase : Optional[int] = title_sep _UpperCamelCase : Optional[int] = doc_sep _UpperCamelCase : Union[str, Any] = n_docs _UpperCamelCase : Tuple = max_combined_length _UpperCamelCase : Union[str, Any] = dataset _UpperCamelCase : Any = dataset_split _UpperCamelCase : List[str] = index_name _UpperCamelCase : int = retrieval_vector_size _UpperCamelCase : str = retrieval_batch_size _UpperCamelCase : Dict = passages_path _UpperCamelCase : str = index_path _UpperCamelCase : Tuple = use_dummy_dataset _UpperCamelCase : Union[str, Any] = output_retrieved _UpperCamelCase : Optional[Any] = do_deduplication _UpperCamelCase : str = use_cache if self.forced_eos_token_id is None: _UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: _UpperCamelCase : Dict = copy.deepcopy(self.__dict__ ) _UpperCamelCase : List[Any] = self.question_encoder.to_dict() _UpperCamelCase : Tuple = self.generator.to_dict() _UpperCamelCase : Any = self.__class__.model_type return output
310
1
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , *__a : Dict , __a : str=None , __a : Dict=None , **__a : int ) -> int: super().__init__(*__a , **__a ) _UpperCamelCase : Union[str, Any] = eval_examples _UpperCamelCase : Optional[int] = post_process_function def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : str=None , __a : str = "eval" ) -> Dict: _UpperCamelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCamelCase : List[Any] = self.get_eval_dataloader(__a ) _UpperCamelCase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCamelCase : Union[str, Any] = self.compute_metrics _UpperCamelCase : List[str] = None _UpperCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _UpperCamelCase : List[Any] = time.time() try: _UpperCamelCase : str = eval_loop( __a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , ) finally: _UpperCamelCase : List[Any] = compute_metrics _UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( __a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _UpperCamelCase : Any = self.post_process_function(__a , __a , output.predictions ) _UpperCamelCase : Optional[Any] = self.compute_metrics(__a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _UpperCamelCase : Any = metrics.pop(__a ) metrics.update(output.metrics ) else: _UpperCamelCase : Optional[int] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__a ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCamelCase : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a ) return metrics def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : Optional[int] , __a : Optional[Any]=None , __a : str = "test" ) -> str: _UpperCamelCase : int = self.get_test_dataloader(__a ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCamelCase : List[Any] = self.compute_metrics _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _UpperCamelCase : List[Any] = time.time() try: _UpperCamelCase : Dict = eval_loop( __a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , ) finally: _UpperCamelCase : Any = compute_metrics _UpperCamelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( __a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _UpperCamelCase : Tuple = self.post_process_function(__a , __a , output.predictions , "predict" ) _UpperCamelCase : List[Any] = self.compute_metrics(__a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _UpperCamelCase : Tuple = metrics.pop(__a ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
310
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int: _UpperCamelCase : Tuple = parent _UpperCamelCase : str = batch_size _UpperCamelCase : Tuple = image_size _UpperCamelCase : List[str] = patch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : List[str] = is_training _UpperCamelCase : Any = use_labels _UpperCamelCase : int = hidden_size _UpperCamelCase : List[Any] = num_hidden_layers _UpperCamelCase : Union[str, Any] = num_attention_heads _UpperCamelCase : Optional[int] = intermediate_size _UpperCamelCase : Any = hidden_act _UpperCamelCase : Dict = hidden_dropout_prob _UpperCamelCase : Dict = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = type_sequence_label_size _UpperCamelCase : int = initializer_range _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Any = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2 _UpperCamelCase : Optional[int] = num_patches + 1 def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : Union[str, Any] = None if self.use_labels: _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase : Any = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = ViTModel(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]: _UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Any = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCamelCase : Union[str, Any] = 1 _UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a ) model.to(__a ) model.eval() _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : Dict = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int: _UpperCamelCase : Any = self.type_sequence_label_size _UpperCamelCase : Optional[Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase : Tuple = 1 _UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : List[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: _UpperCamelCase : Dict = self.prepare_config_and_inputs() ( ( _UpperCamelCase ), ( _UpperCamelCase ), ( _UpperCamelCase ), ) : Union[str, Any] = config_and_inputs _UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ :Any = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ :str = True SCREAMING_SNAKE_CASE__ :List[Any] = False SCREAMING_SNAKE_CASE__ :int = False SCREAMING_SNAKE_CASE__ :int = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase : Dict = ViTModelTester(self ) _UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: pass def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCamelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Any = model_class(__a ) _UpperCamelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : List[str] = [*signature.parameters.keys()] _UpperCamelCase : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : List[str] = ViTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: _UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a ) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[Any] = prepare_img() _UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : Dict = model(**__a ) # verify the logits _UpperCamelCase : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) _UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. _UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a ) _UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a ) # verify the logits _UpperCamelCase : int = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , __a ) _UpperCamelCase : int = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: _UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) _UpperCamelCase : int = self.default_image_processor _UpperCamelCase : Dict = prepare_img() _UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _UpperCamelCase : int = model(__a )
310
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl" def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Union[str, Any] = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = max_position_embeddings _UpperCamelCase : Optional[Any] = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[int] = -1 _UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Any = TextStreamer(__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Optional[int] = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Dict = -1 _UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] ) _UpperCamelCase : Tuple = TextIteratorStreamer(__a ) _UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() _UpperCamelCase : Tuple = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict: _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Union[str, Any] = -1 _UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :] _UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Tuple = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" ) _UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a ) _UpperCamelCase : int = -1 _UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a ) model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCamelCase : int = cs.out[:-1] # Remove the final "\n" _UpperCamelCase : int = tokenizer(__a , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[Any] = -1 _UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 ) _UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__a ): _UpperCamelCase : List[str] = "" for new_text in streamer: streamer_text += new_text
310
1
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ) -> str: """simple docstring""" raise RuntimeError("CUDA out of memory." ) class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ) -> List[str]: super().__init__() _UpperCamelCase : List[str] = nn.Linear(3 , 4 ) _UpperCamelCase : int = nn.BatchNormad(4 ) _UpperCamelCase : Dict = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: _UpperCamelCase : List[str] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__a : List[str] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__a , [128, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: _UpperCamelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__a : List[Any] , __a : Optional[Any] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga _UpperCamelCase, _UpperCamelCase : Tuple = mock_training_loop_function("hello" ) self.assertListEqual(__a , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, "hello"] ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__a : Tuple ): pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__a : Tuple ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__a ) as cm: mock_training_loop_function(128 , "hello" , "world" ) self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] ) self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__a : int ): raise ValueError("Oops, we had an error!" ) with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!" , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: _UpperCamelCase : List[str] = torch.cuda.memory_allocated() _UpperCamelCase : Dict = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __a ) _UpperCamelCase : Union[str, Any] = release_memory(__a ) self.assertEqual(torch.cuda.memory_allocated() , __a )
310
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" with open(lowercase_ ) as metadata_file: _UpperCamelCase : Dict = json.load(lowercase_ ) _UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"] # Load the entity vocab file _UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ ) # add an entry for [MASK2] _UpperCamelCase : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) _UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f: _UpperCamelCase : Tuple = json.load(lowercase_ ) _UpperCamelCase : Optional[int] = "MLukeTokenizer" with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) _UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ ) # Initialize the embeddings of the special tokens _UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0] _UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0] _UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) _UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCamelCase : Optional[Any] = state_dict[bias_name] _UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.''' _UpperCamelCase : List[Any] = state_dict[prefix + matrix_name] _UpperCamelCase : str = state_dict[prefix + matrix_name] _UpperCamelCase : Any = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCamelCase : int = state_dict["entity_predictions.bias"] _UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _UpperCamelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _UpperCamelCase : Union[str, Any] = state_dict[key] else: _UpperCamelCase : Dict = state_dict[key] _UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ ) if set(lowercase_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(lowercase_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" ) _UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _UpperCamelCase : Optional[Any] = (0, 9) _UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : List[str] = model(**lowercase_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 33, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 1, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ) _UpperCamelCase : int = "Tokyo is the capital of <mask>." _UpperCamelCase : List[Any] = (24, 30) _UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : Optional[Any] = model(**lowercase_ ) _UpperCamelCase : int = encoding["input_ids"][0].tolist() _UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase_ ) _UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item() _UpperCamelCase : Tuple = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase_ ) ) model.save_pretrained(lowercase_ ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"] _UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )] _UpperCamelCase : List[str] = {} for entry in data: _UpperCamelCase : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCamelCase : Dict = entity_id break _UpperCamelCase : Dict = F'''{language}:{entity_name}''' _UpperCamelCase : str = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
310
1
"""simple docstring""" lowerCamelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" _UpperCamelCase : Tuple = [False] * len(lowercase_ ) _UpperCamelCase : Dict = [s] _UpperCamelCase : List[str] = True while queue: _UpperCamelCase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase_ ) _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = u return visited[t] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = [-1] * (len(lowercase_ )) _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ): _UpperCamelCase : int = float("Inf" ) _UpperCamelCase : Optional[Any] = sink while s != source: # Find the minimum value in select path _UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] ) _UpperCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _UpperCamelCase : Union[str, Any] = sink while v != source: _UpperCamelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase : Dict = parent[v] for i in range(len(lowercase_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
310
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a ) }
310
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset lowerCamelCase__ = random.Random() def lowercase__ ( lowercase_ ,lowercase_=1.0 ,lowercase_=None ,lowercase_=None ) -> str: """simple docstring""" if rng is None: _UpperCamelCase : List[str] = global_rng _UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] , __a : Optional[int] , __a : Union[str, Any]=7 , __a : Any=400 , __a : List[str]=2000 , __a : str=2048 , __a : int=128 , __a : Tuple=1 , __a : Any=512 , __a : str=30 , __a : int=4_4100 , ) -> Dict: _UpperCamelCase : List[str] = parent _UpperCamelCase : Union[str, Any] = batch_size _UpperCamelCase : Tuple = min_seq_length _UpperCamelCase : Optional[Any] = max_seq_length _UpperCamelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCamelCase : int = spectrogram_length _UpperCamelCase : str = feature_size _UpperCamelCase : str = num_audio_channels _UpperCamelCase : Union[str, Any] = hop_length _UpperCamelCase : Union[str, Any] = chunk_length _UpperCamelCase : int = sampling_rate def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any]=False , __a : Tuple=False ) -> Dict: def _flatten(__a : Optional[Any] ): return list(itertools.chain(*__a ) ) if equal_length: _UpperCamelCase : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCamelCase : Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCamelCase : List[Any] = [np.asarray(__a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = TvltFeatureExtractor def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict: _UpperCamelCase : int = TvltFeatureExtractionTester(self ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: _UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(__a , "spectrogram_length" ) ) self.assertTrue(hasattr(__a , "feature_size" ) ) self.assertTrue(hasattr(__a , "num_audio_channels" ) ) self.assertTrue(hasattr(__a , "hop_length" ) ) self.assertTrue(hasattr(__a , "chunk_length" ) ) self.assertTrue(hasattr(__a , "sampling_rate" ) ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: _UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase : int = feat_extract_first.save_pretrained(__a )[0] check_json_file_has_correct_format(__a ) _UpperCamelCase : List[str] = self.feature_extraction_class.from_pretrained(__a ) _UpperCamelCase : Dict = feat_extract_first.to_dict() _UpperCamelCase : Optional[int] = feat_extract_second.to_dict() _UpperCamelCase : Dict = dict_first.pop("mel_filters" ) _UpperCamelCase : Optional[Any] = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(__a , __a ) ) self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: _UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase : str = os.path.join(__a , "feat_extract.json" ) feat_extract_first.to_json_file(__a ) _UpperCamelCase : Dict = self.feature_extraction_class.from_json_file(__a ) _UpperCamelCase : Optional[Any] = feat_extract_first.to_dict() _UpperCamelCase : List[Any] = feat_extract_second.to_dict() _UpperCamelCase : List[str] = dict_first.pop("mel_filters" ) _UpperCamelCase : Optional[Any] = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(__a , __a ) ) self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: # Initialize feature_extractor _UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _UpperCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCamelCase : int = [np.asarray(__a ) for speech_input in speech_inputs] # Test not batched input _UpperCamelCase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _UpperCamelCase : str = feature_extractor(__a , return_tensors="np" , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _UpperCamelCase : Dict = feature_extractor( __a , return_tensors="np" , sampling_rate=4_4100 , mask_audio=__a ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _UpperCamelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCamelCase : Tuple = np.asarray(__a ) _UpperCamelCase : List[str] = feature_extractor(__a , return_tensors="np" , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Dict ) -> Dict: _UpperCamelCase : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCamelCase : List[Any] = ds.sort("id" ).select(range(__a ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: _UpperCamelCase : List[Any] = self._load_datasamples(1 ) _UpperCamelCase : Optional[Any] = TvltFeatureExtractor() _UpperCamelCase : int = feature_extractor(__a , return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) _UpperCamelCase : Optional[Any] = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __a , atol=1e-4 ) )
310
"""simple docstring""" from __future__ import annotations from math import pi def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100""" lowerCamelCase__ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: lowerCamelCase__ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: lowerCamelCase__ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
310
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( lowercase_ ) -> str: """simple docstring""" if "://" in dataset_path: _UpperCamelCase : List[Any] = dataset_path.split("://" )[1] return dataset_path def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ ) def lowercase__ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCamelCase : Dict = None _UpperCamelCase : str = None _UpperCamelCase : str = threading.Lock()
310
1
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCamelCase__ = 3 def lowercase__ ( lowercase_ ) -> int: """simple docstring""" print("Generating primitive root of p" ) while True: _UpperCamelCase : List[str] = random.randrange(3 ,lowercase_ ) if pow(lowercase_ ,2 ,lowercase_ ) == 1: continue if pow(lowercase_ ,lowercase_ ,lowercase_ ) == 1: continue return g def lowercase__ ( lowercase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: """simple docstring""" print("Generating prime p..." ) _UpperCamelCase : Union[str, Any] = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number. _UpperCamelCase : List[Any] = primitive_root(lowercase_ ) # one primitive root on modulo p. _UpperCamelCase : Optional[int] = random.randrange(3 ,lowercase_ ) # private_key -> have to be greater than 2 for safety. _UpperCamelCase : int = cryptomath.find_mod_inverse(pow(lowercase_ ,lowercase_ ,lowercase_ ) ,lowercase_ ) _UpperCamelCase : List[Any] = (key_size, e_a, e_a, p) _UpperCamelCase : List[Any] = (key_size, d) return public_key, private_key def lowercase__ ( lowercase_ ,lowercase_ ) -> None: """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print("\nWARNING:" ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program." ) sys.exit() _UpperCamelCase, _UpperCamelCase : int = generate_key(lowercase_ ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' ,"w" ) as fo: fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' ,"w" ) as fo: fo.write(F'''{private_key[0]},{private_key[1]}''' ) def lowercase__ ( ) -> None: """simple docstring""" print("Making key files..." ) make_key_files("elgamal" ,2_048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
310
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
310
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = ["pixel_values"] def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256} _UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" ) _UpperCamelCase : str = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : int = do_center_crop _UpperCamelCase : int = crop_size _UpperCamelCase : Optional[Any] = resample _UpperCamelCase : Dict = do_rescale _UpperCamelCase : Any = rescale_factor _UpperCamelCase : Any = offset _UpperCamelCase : Union[str, Any] = do_normalize _UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" in size: _UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: _UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]: _UpperCamelCase : Any = image.astype(np.floataa ) if offset: _UpperCamelCase : Dict = image - (scale / 2) return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = to_numpy_array(__a ) if do_resize: _UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a ) if do_center_crop: _UpperCamelCase : Dict = self.center_crop(__a , size=__a ) if do_rescale: _UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a ) if do_normalize: _UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a ) _UpperCamelCase : str = to_channel_dimension_format(__a , __a ) return image def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: _UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = offset if offset is not None else self.offset _UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCamelCase : Union[str, Any] = make_batched(__a ) _UpperCamelCase : Optional[Any] = [ [ self._preprocess_image( image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , ) for img in video ] for video in videos ] _UpperCamelCase : List[Any] = {"pixel_values": videos} return BatchFeature(data=__a , tensor_type=__a )
310
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100""" lowerCamelCase__ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: lowerCamelCase__ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: lowerCamelCase__ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
310
1
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = RobertaConfig SCREAMING_SNAKE_CASE__ :Any = "roberta" def __init__( self : int , __a : List[Any] ) -> List[str]: super().__init__(__a ) _UpperCamelCase : Optional[Any] = RobertaEmbeddings(__a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = RobertaConfig SCREAMING_SNAKE_CASE__ :Union[str, Any] = "roberta" def __init__( self : Tuple , __a : Optional[int] ) -> Optional[int]: super().__init__(__a ) _UpperCamelCase : Tuple = config.num_labels _UpperCamelCase : Dict = config.num_hidden_layers _UpperCamelCase : Optional[Any] = DeeRobertaModel(__a ) _UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) _UpperCamelCase : Tuple = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int=None , __a : List[str]=None , __a : Optional[Any]=None , __a : int=None , __a : Dict=None , __a : Optional[Any]=None , __a : int=None , __a : Dict=-1 , __a : Union[str, Any]=False , ) -> str: _UpperCamelCase : Tuple = self.num_layers try: _UpperCamelCase : Union[str, Any] = self.roberta( __a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , ) _UpperCamelCase : int = outputs[1] _UpperCamelCase : Any = self.dropout(__a ) _UpperCamelCase : Optional[int] = self.classifier(__a ) _UpperCamelCase : str = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _UpperCamelCase : str = e.message _UpperCamelCase : Tuple = e.exit_layer _UpperCamelCase : Optional[int] = outputs[0] if not self.training: _UpperCamelCase : List[Any] = entropy(__a ) _UpperCamelCase : List[Any] = [] _UpperCamelCase : Union[str, Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression _UpperCamelCase : List[Any] = MSELoss() _UpperCamelCase : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _UpperCamelCase : Tuple = CrossEntropyLoss() _UpperCamelCase : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _UpperCamelCase : Any = [] for highway_exit in outputs[-1]: _UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(__a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _UpperCamelCase : Optional[Any] = MSELoss() _UpperCamelCase : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _UpperCamelCase : Optional[Any] = CrossEntropyLoss() _UpperCamelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__a ) if train_highway: _UpperCamelCase : Optional[int] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _UpperCamelCase : Union[str, Any] = (loss,) + outputs if not self.training: _UpperCamelCase : int = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
310
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl" def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Union[str, Any] = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = max_position_embeddings _UpperCamelCase : Optional[Any] = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
1
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE__ :Any = "Pix2StructImageProcessor" SCREAMING_SNAKE_CASE__ :Optional[Any] = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : int , __a : List[Any] , __a : Any ) -> int: _UpperCamelCase : List[Any] = False super().__init__(__a , __a ) def __call__( self : List[str] , __a : Tuple=None , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : Optional[int] = 2048 , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Any , ) -> BatchEncoding: if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None and not self.image_processor.is_vqa: _UpperCamelCase : Optional[Any] = self.tokenizer _UpperCamelCase : Tuple = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values _UpperCamelCase : Optional[Any] = self.image_processor( __a , return_tensors=__a , max_patches=__a , **__a ) else: # add pixel_values and bbox _UpperCamelCase : int = self.image_processor( __a , return_tensors=__a , max_patches=__a , header_text=__a , **__a ) if text is not None and not self.image_processor.is_vqa: _UpperCamelCase : Dict = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) if "attention_mask" in text_encoding: _UpperCamelCase : Union[str, Any] = text_encoding.pop("attention_mask" ) if "input_ids" in text_encoding: _UpperCamelCase : Optional[Any] = text_encoding.pop("input_ids" ) else: _UpperCamelCase : Optional[int] = None if text_encoding is not None: encoding_image_processor.update(__a ) return encoding_image_processor def __SCREAMING_SNAKE_CASE ( self : List[str] , *__a : Optional[Any] , **__a : Optional[int] ) -> Dict: return self.tokenizer.batch_decode(*__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : str , *__a : Dict , **__a : Optional[int] ) -> int: return self.tokenizer.decode(*__a , **__a ) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: _UpperCamelCase : Union[str, Any] = self.tokenizer.model_input_names _UpperCamelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
310
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]: _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int: _UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) import datasets _UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _UpperCamelCase : List[Any] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @require_torch def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3" _UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) _UpperCamelCase : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = "facebook/detr-resnet-50" _UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Dict = "facebook/detr-resnet-50" _UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a ) _UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : Tuple = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: _UpperCamelCase : Tuple = 0.99_85 _UpperCamelCase : List[Any] = "facebook/detr-resnet-50" _UpperCamelCase : List[str] = pipeline("object-detection" , model=__a ) _UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd" _UpperCamelCase : int = 0.99_93 _UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a ) _UpperCamelCase : Union[str, Any] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
310
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :torch.FloatTensor class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , __a : Optional[Any]=3 , __a : int=3 , __a : str=("DownEncoderBlock2D",) , __a : Tuple=(64,) , __a : List[str]=2 , __a : Any=32 , __a : Tuple="silu" , __a : Any=True , ) -> Optional[int]: super().__init__() _UpperCamelCase : Tuple = layers_per_block _UpperCamelCase : List[Any] = torch.nn.Convad( __a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) _UpperCamelCase : int = None _UpperCamelCase : str = nn.ModuleList([] ) # down _UpperCamelCase : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(__a ): _UpperCamelCase : int = output_channel _UpperCamelCase : int = block_out_channels[i] _UpperCamelCase : Dict = i == len(__a ) - 1 _UpperCamelCase : Union[str, Any] = get_down_block( __a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , ) self.down_blocks.append(__a ) # mid _UpperCamelCase : Optional[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , ) # out _UpperCamelCase : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 ) _UpperCamelCase : Any = nn.SiLU() _UpperCamelCase : str = 2 * out_channels if double_z else out_channels _UpperCamelCase : Optional[Any] = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 ) _UpperCamelCase : Dict = False def __SCREAMING_SNAKE_CASE ( self : int , __a : Union[str, Any] ) -> int: _UpperCamelCase : str = x _UpperCamelCase : Any = self.conv_in(__a ) if self.training and self.gradient_checkpointing: def create_custom_forward(__a : Optional[Any] ): def custom_forward(*__a : Union[str, Any] ): return module(*__a ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: _UpperCamelCase : str = torch.utils.checkpoint.checkpoint( create_custom_forward(__a ) , __a , use_reentrant=__a ) # middle _UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __a , use_reentrant=__a ) else: for down_block in self.down_blocks: _UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a ) # middle _UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a ) else: # down for down_block in self.down_blocks: _UpperCamelCase : int = down_block(__a ) # middle _UpperCamelCase : List[Any] = self.mid_block(__a ) # post-process _UpperCamelCase : Any = self.conv_norm_out(__a ) _UpperCamelCase : Union[str, Any] = self.conv_act(__a ) _UpperCamelCase : Dict = self.conv_out(__a ) return sample class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , __a : Any=3 , __a : Optional[Any]=3 , __a : int=("UpDecoderBlock2D",) , __a : Union[str, Any]=(64,) , __a : Union[str, Any]=2 , __a : int=32 , __a : Dict="silu" , __a : List[Any]="group" , ) -> Any: super().__init__() _UpperCamelCase : str = layers_per_block _UpperCamelCase : int = nn.Convad( __a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) _UpperCamelCase : Dict = None _UpperCamelCase : Union[str, Any] = nn.ModuleList([] ) _UpperCamelCase : List[str] = in_channels if norm_type == "spatial" else None # mid _UpperCamelCase : Any = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , ) # up _UpperCamelCase : Dict = list(reversed(__a ) ) _UpperCamelCase : Tuple = reversed_block_out_channels[0] for i, up_block_type in enumerate(__a ): _UpperCamelCase : Dict = output_channel _UpperCamelCase : Optional[Any] = reversed_block_out_channels[i] _UpperCamelCase : Optional[Any] = i == len(__a ) - 1 _UpperCamelCase : Union[str, Any] = get_up_block( __a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , ) self.up_blocks.append(__a ) _UpperCamelCase : str = output_channel # out if norm_type == "spatial": _UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a ) else: _UpperCamelCase : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 ) _UpperCamelCase : Union[str, Any] = nn.SiLU() _UpperCamelCase : Tuple = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 ) _UpperCamelCase : Any = False def __SCREAMING_SNAKE_CASE ( self : int , __a : Optional[Any] , __a : Any=None ) -> Any: _UpperCamelCase : str = z _UpperCamelCase : int = self.conv_in(__a ) _UpperCamelCase : str = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__a : int ): def custom_forward(*__a : Dict ): return module(*__a ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle _UpperCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a ) _UpperCamelCase : List[str] = sample.to(__a ) # up for up_block in self.up_blocks: _UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(__a ) , __a , __a , use_reentrant=__a ) else: # middle _UpperCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __a , __a ) _UpperCamelCase : Optional[Any] = sample.to(__a ) # up for up_block in self.up_blocks: _UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a ) else: # middle _UpperCamelCase : Any = self.mid_block(__a , __a ) _UpperCamelCase : Any = sample.to(__a ) # up for up_block in self.up_blocks: _UpperCamelCase : List[str] = up_block(__a , __a ) # post-process if latent_embeds is None: _UpperCamelCase : int = self.conv_norm_out(__a ) else: _UpperCamelCase : str = self.conv_norm_out(__a , __a ) _UpperCamelCase : Dict = self.conv_act(__a ) _UpperCamelCase : List[str] = self.conv_out(__a ) return sample class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : List[str]=None , __a : List[str]="random" , __a : List[Any]=False , __a : Any=True ) -> List[str]: super().__init__() _UpperCamelCase : List[Any] = n_e _UpperCamelCase : Tuple = vq_embed_dim _UpperCamelCase : List[str] = beta _UpperCamelCase : Dict = legacy _UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) _UpperCamelCase : Any = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) _UpperCamelCase : int = self.used.shape[0] _UpperCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": _UpperCamelCase : int = self.re_embed _UpperCamelCase : Union[str, Any] = self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: _UpperCamelCase : Tuple = n_e _UpperCamelCase : Optional[int] = sane_index_shape def __SCREAMING_SNAKE_CASE ( self : int , __a : str ) -> str: _UpperCamelCase : Union[str, Any] = inds.shape assert len(__a ) > 1 _UpperCamelCase : Optional[Any] = inds.reshape(ishape[0] , -1 ) _UpperCamelCase : List[str] = self.used.to(__a ) _UpperCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() _UpperCamelCase : int = match.argmax(-1 ) _UpperCamelCase : List[str] = match.sum(2 ) < 1 if self.unknown_index == "random": _UpperCamelCase : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: _UpperCamelCase : Dict = self.unknown_index return new.reshape(__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : List[Any] ) -> Optional[Any]: _UpperCamelCase : Union[str, Any] = inds.shape assert len(__a ) > 1 _UpperCamelCase : str = inds.reshape(ishape[0] , -1 ) _UpperCamelCase : Union[str, Any] = self.used.to(__a ) if self.re_embed > self.used.shape[0]: # extra token _UpperCamelCase : int = 0 # simply set to zero _UpperCamelCase : List[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a ) return back.reshape(__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : int ) -> List[str]: # reshape z -> (batch, height, width, channel) and flatten _UpperCamelCase : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous() _UpperCamelCase : Dict = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z _UpperCamelCase : Dict = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 ) _UpperCamelCase : Union[str, Any] = self.embedding(__a ).view(z.shape ) _UpperCamelCase : Optional[int] = None _UpperCamelCase : Optional[int] = None # compute loss for embedding if not self.legacy: _UpperCamelCase : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: _UpperCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients _UpperCamelCase : Tuple = z + (z_q - z).detach() # reshape back to match original input shape _UpperCamelCase : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: _UpperCamelCase : Any = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis _UpperCamelCase : Optional[int] = self.remap_to_used(__a ) _UpperCamelCase : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: _UpperCamelCase : Optional[int] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict , __a : int ) -> Any: # shape specifying (batch, height, width, channel) if self.remap is not None: _UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis _UpperCamelCase : Optional[int] = self.unmap_to_all(__a ) _UpperCamelCase : Optional[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors _UpperCamelCase : Optional[int] = self.embedding(__a ) if shape is not None: _UpperCamelCase : List[Any] = z_q.view(__a ) # reshape back to match original input shape _UpperCamelCase : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : List[Any]=False ) -> Union[str, Any]: _UpperCamelCase : List[str] = parameters _UpperCamelCase, _UpperCamelCase : Tuple = torch.chunk(__a , 2 , dim=1 ) _UpperCamelCase : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 ) _UpperCamelCase : Optional[Any] = deterministic _UpperCamelCase : Tuple = torch.exp(0.5 * self.logvar ) _UpperCamelCase : Tuple = torch.exp(self.logvar ) if self.deterministic: _UpperCamelCase : Optional[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype _UpperCamelCase : Tuple = randn_tensor( self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype ) _UpperCamelCase : Union[str, Any] = self.mean + self.std * sample return x def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str]=None ) -> Tuple: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=[1, 2, 3] ) -> Optional[int]: if self.deterministic: return torch.Tensor([0.0] ) _UpperCamelCase : List[str] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: return self.mean
310
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCamelCase__ = {"UserAgent": UserAgent().random} def lowercase__ ( lowercase_ ) -> dict: """simple docstring""" _UpperCamelCase : str = script.contents[0] _UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : str ) -> Tuple: _UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/''' _UpperCamelCase : Optional[Any] = self.get_json() def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict: _UpperCamelCase : int = requests.get(self.url , headers=__a ).text _UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : List[Any] ) -> str: return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : str ) -> str: return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: return self.user_data["username"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["full_name"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return self.user_data["biography"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["business_email"] @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["external_url"] @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.user_data["edge_followed_by"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.user_data["edge_follow"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool: return self.user_data["is_verified"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: return self.user_data["is_private"] def lowercase__ ( lowercase_ = "github" ) -> None: """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions _UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data ,lowercase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = InstagramUser("github") print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
310
1
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = False ) -> list[float]: """simple docstring""" if radian_mode: return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )] return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 10**-1 ) -> bool: """simple docstring""" _UpperCamelCase : NDArray[floataa] = cross(lowercase_ ,lowercase_ ) _UpperCamelCase : float = sum(lowercase_ ) return abs(lowercase_ ) < eps if __name__ == "__main__": # Test to check if it works lowerCamelCase__ = array( [ polar_force(7_1_8.4, 180 - 30), polar_force(8_7_9.5_4, 45), polar_force(100, -90), ] ) lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg lowerCamelCase__ = array( [ polar_force(30 * 9.8_1, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg lowerCamelCase__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) lowerCamelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
310
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[Any] = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : Any = _sin / (2 * q_factor) _UpperCamelCase : str = (1 - _cos) / 2 _UpperCamelCase : Any = 1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : List[str] = -2 * _cos _UpperCamelCase : Tuple = 1 - alpha _UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : List[str] = tau * frequency / samplerate _UpperCamelCase : str = sin(lowercase_ ) _UpperCamelCase : Optional[Any] = cos(lowercase_ ) _UpperCamelCase : Dict = _sin / (2 * q_factor) _UpperCamelCase : List[Any] = (1 + _cos) / 2 _UpperCamelCase : Optional[int] = -1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : str = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Tuple = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Dict = _sin / 2 _UpperCamelCase : int = 0 _UpperCamelCase : str = -ba _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : Optional[int] = -2 * _cos _UpperCamelCase : Optional[Any] = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : str = tau * frequency / samplerate _UpperCamelCase : Optional[Any] = sin(lowercase_ ) _UpperCamelCase : Optional[int] = cos(lowercase_ ) _UpperCamelCase : int = _sin / (2 * q_factor) _UpperCamelCase : List[str] = 1 - alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : Union[str, Any] = 1 + alpha _UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : int = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : List[Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Optional[int] = 10 ** (gain_db / 40) _UpperCamelCase : str = 1 + alpha * big_a _UpperCamelCase : Union[str, Any] = -2 * _cos _UpperCamelCase : Optional[int] = 1 - alpha * big_a _UpperCamelCase : int = 1 + alpha / big_a _UpperCamelCase : Optional[Any] = -2 * _cos _UpperCamelCase : Any = 1 - alpha / big_a _UpperCamelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Union[str, Any] = tau * frequency / samplerate _UpperCamelCase : Any = sin(lowercase_ ) _UpperCamelCase : Union[str, Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : Any = big_a * (pmc + aaa) _UpperCamelCase : Dict = 2 * big_a * mpc _UpperCamelCase : str = big_a * (pmc - aaa) _UpperCamelCase : Dict = ppmc + aaa _UpperCamelCase : List[Any] = -2 * pmpc _UpperCamelCase : Dict = ppmc - aaa _UpperCamelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[int] = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : Any = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : str = 10 ** (gain_db / 40) _UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : List[Any] = big_a * (ppmc + aaa) _UpperCamelCase : Dict = -2 * big_a * pmpc _UpperCamelCase : Dict = big_a * (ppmc - aaa) _UpperCamelCase : Optional[Any] = pmc + aaa _UpperCamelCase : Any = 2 * mpc _UpperCamelCase : Any = pmc - aaa _UpperCamelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
310
1
"""simple docstring""" def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : Dict = [0] * len(lowercase_ ) _UpperCamelCase : Dict = [] _UpperCamelCase : Union[str, Any] = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: _UpperCamelCase : int = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: _UpperCamelCase : List[str] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph lowerCamelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
310
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
1
"""simple docstring""" class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , __a : list[int] ) -> None: _UpperCamelCase : List[str] = len(__a ) _UpperCamelCase : List[Any] = [0] * len_array if len_array > 0: _UpperCamelCase : Union[str, Any] = array[0] for i in range(1 , __a ): _UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i] def __SCREAMING_SNAKE_CASE ( self : str , __a : int , __a : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int ) -> bool: _UpperCamelCase : Optional[Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__a ) return False if __name__ == "__main__": import doctest doctest.testmod()
310
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = prime_factors(lowercase_ ) if is_square_free(lowercase_ ): return -1 if len(lowercase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowercase__ ( lowercase_ ) -> int: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase_ ) def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main _UpperCamelCase : Optional[Any] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(lowercase_ ,id=lowercase_ )
310
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ :Dict = True SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True} SCREAMING_SNAKE_CASE__ :Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] _UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCamelCase : str = {"unk_token": "<unk>"} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple: _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: _UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase : Optional[Any] = "lower newer" _UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] _UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : str = tokens + [tokenizer.unk_token] _UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: if not self.test_rust_tokenizer: return _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = "lower newer" # Testing tokenization _UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens _UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens _UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token _UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token] _UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input _UpperCamelCase : Optional[int] = "This is a simple input" _UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Dict = ("This is a simple input", "This is a pair") _UpperCamelCase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: _UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input _UpperCamelCase : Union[str, Any] = "This is a simple input" _UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"] _UpperCamelCase : str = ("This is a simple input", "This is a pair") _UpperCamelCase : List[str] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] _UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id _UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" ) _UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) _UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" ) _UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: _UpperCamelCase : Any = "$$$" _UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) _UpperCamelCase : int = "This is a simple input" _UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id _UpperCamelCase : str = tokenizer(__a ) _UpperCamelCase : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids ) _UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase : Tuple = "Encode this." _UpperCamelCase : List[str] = "This one too please." _UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a ) encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a ) _UpperCamelCase : int = tokenizer.encode_plus( __a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , ) _UpperCamelCase : str = encoded_sequence_dict["input_ids"] _UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(__a ) , len(__a ) ) _UpperCamelCase : Union[str, Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__a ) ] _UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(__a , __a ) @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Any = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) _UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" ) _UpperCamelCase : Optional[Any] = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: _UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Union[str, Any] = tokenizer.encode( __a , ) # Same as above self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[str] = "bos" _UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"] _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : List[Any] = tokenizer.encode( __a , ) # We changed the bos token self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) _UpperCamelCase : Tuple = tokenizer.encode( __a , ) self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
310
1
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCamelCase__ = 256 class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = ["melgan"] def __init__( self : Union[str, Any] , __a : SpectrogramNotesEncoder , __a : SpectrogramContEncoder , __a : TaFilmDecoder , __a : DDPMScheduler , __a : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN _UpperCamelCase : Tuple = math.log(1e-5 ) # Matches MelGAN training. _UpperCamelCase : Union[str, Any] = 4.0 # Largest value for most examples _UpperCamelCase : str = 128 self.register_modules( notes_encoder=__a , continuous_encoder=__a , decoder=__a , scheduler=__a , melgan=__a , ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : int=(-1.0, 1.0) , __a : Union[str, Any]=False ) -> int: _UpperCamelCase, _UpperCamelCase : List[str] = output_range if clip: _UpperCamelCase : int = torch.clip(__a , self.min_value , self.max_value ) # Scale to [0, 1]. _UpperCamelCase : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Tuple=(-1.0, 1.0) , __a : Dict=False ) -> Optional[int]: _UpperCamelCase, _UpperCamelCase : int = input_range _UpperCamelCase : Optional[Any] = torch.clip(__a , __a , __a ) if clip else outputs # Scale to [0, 1]. _UpperCamelCase : List[str] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : str , __a : Dict ) -> str: _UpperCamelCase : str = input_tokens > 0 _UpperCamelCase, _UpperCamelCase : List[Any] = self.notes_encoder( encoder_input_tokens=__a , encoder_inputs_mask=__a ) _UpperCamelCase, _UpperCamelCase : Tuple = self.continuous_encoder( encoder_inputs=__a , encoder_inputs_mask=__a ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict , __a : Optional[int] , __a : int ) -> Optional[int]: _UpperCamelCase : List[str] = noise_time if not torch.is_tensor(__a ): _UpperCamelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0: _UpperCamelCase : Any = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _UpperCamelCase : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) _UpperCamelCase : Union[str, Any] = self.decoder( encodings_and_masks=__a , decoder_input_tokens=__a , decoder_noise_time=__a ) return logits @torch.no_grad() def __call__( self : Tuple , __a : List[List[int]] , __a : Optional[torch.Generator] = None , __a : int = 100 , __a : bool = True , __a : str = "numpy" , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(__a )}.''' ) _UpperCamelCase : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) _UpperCamelCase : int = np.zeros([1, 0, self.n_dims] , np.floataa ) _UpperCamelCase : Optional[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__a , device=self.device ) for i, encoder_input_tokens in enumerate(__a ): if i == 0: _UpperCamelCase : Optional[int] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. _UpperCamelCase : Union[str, Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__a , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. _UpperCamelCase : Dict = ones _UpperCamelCase : str = self.scale_features( __a , output_range=[-1.0, 1.0] , clip=__a ) _UpperCamelCase : Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__a , continuous_mask=__a , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop _UpperCamelCase : int = randn_tensor( shape=encoder_continuous_inputs.shape , generator=__a , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(__a ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _UpperCamelCase : Optional[Any] = self.decode( encodings_and_masks=__a , input_tokens=__a , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 _UpperCamelCase : List[str] = self.scheduler.step(__a , __a , __a , generator=__a ).prev_sample _UpperCamelCase : Tuple = self.scale_to_features(__a , input_range=[-1.0, 1.0] ) _UpperCamelCase : Optional[Any] = mel[:1] _UpperCamelCase : int = mel.cpu().float().numpy() _UpperCamelCase : int = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__a , __a ) logger.info("Generated segment" , __a ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": _UpperCamelCase : Dict = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: _UpperCamelCase : List[Any] = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=__a )
310
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = load_tool("text-question-answering" ) self.tool.setup() _UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" )
310
1
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = load_tool("text-question-answering" ) self.tool.setup() _UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" )
310
"""simple docstring""" lowerCamelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" _UpperCamelCase : Tuple = [False] * len(lowercase_ ) _UpperCamelCase : Dict = [s] _UpperCamelCase : List[str] = True while queue: _UpperCamelCase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase_ ) _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = u return visited[t] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = [-1] * (len(lowercase_ )) _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ): _UpperCamelCase : int = float("Inf" ) _UpperCamelCase : Optional[Any] = sink while s != source: # Find the minimum value in select path _UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] ) _UpperCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _UpperCamelCase : Union[str, Any] = sink while v != source: _UpperCamelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase : Dict = parent[v] for i in range(len(lowercase_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
310
1
"""simple docstring""" def lowercase__ ( lowercase_ ,lowercase_ ) -> str: """simple docstring""" if not (isinstance(lowercase_ ,lowercase_ ) and isinstance(lowercase_ ,lowercase_ )): raise ValueError("longest_common_substring() takes two strings for inputs" ) _UpperCamelCase : Union[str, Any] = len(lowercase_ ) _UpperCamelCase : str = len(lowercase_ ) _UpperCamelCase : Any = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _UpperCamelCase : Tuple = 0 _UpperCamelCase : List[Any] = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _UpperCamelCase : Optional[Any] = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _UpperCamelCase : str = i _UpperCamelCase : Union[str, Any] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
310
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = ["pixel_values"] def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256} _UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" ) _UpperCamelCase : str = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : int = do_center_crop _UpperCamelCase : int = crop_size _UpperCamelCase : Optional[Any] = resample _UpperCamelCase : Dict = do_rescale _UpperCamelCase : Any = rescale_factor _UpperCamelCase : Any = offset _UpperCamelCase : Union[str, Any] = do_normalize _UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" in size: _UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: _UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]: _UpperCamelCase : Any = image.astype(np.floataa ) if offset: _UpperCamelCase : Dict = image - (scale / 2) return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = to_numpy_array(__a ) if do_resize: _UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a ) if do_center_crop: _UpperCamelCase : Dict = self.center_crop(__a , size=__a ) if do_rescale: _UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a ) if do_normalize: _UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a ) _UpperCamelCase : str = to_channel_dimension_format(__a , __a ) return image def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: _UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = offset if offset is not None else self.offset _UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCamelCase : Union[str, Any] = make_batched(__a ) _UpperCamelCase : Optional[Any] = [ [ self._preprocess_image( image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , ) for img in video ] for video in videos ] _UpperCamelCase : List[Any] = {"pixel_values": videos} return BatchFeature(data=__a , tensor_type=__a )
310
1
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = "M-CLIP" def __init__( self : List[Any] , __a : Dict=1024 , __a : Dict=768 , **__a : int ) -> Any: _UpperCamelCase : Optional[int] = transformerDimSize _UpperCamelCase : Union[str, Any] = imageDimSize super().__init__(**__a ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = MCLIPConfig def __init__( self : int , __a : Optional[int] , *__a : Optional[Any] , **__a : Optional[Any] ) -> Optional[Any]: super().__init__(__a , *__a , **__a ) _UpperCamelCase : Any = XLMRobertaModel(__a ) _UpperCamelCase : Union[str, Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __SCREAMING_SNAKE_CASE ( self : str , __a : int , __a : str ) -> int: _UpperCamelCase : int = self.transformer(input_ids=__a , attention_mask=__a )[0] _UpperCamelCase : Optional[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__a ), embs
310
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase__ = True except ImportError: lowerCamelCase__ = False try: from torch.hub import _get_torch_home lowerCamelCase__ = _get_torch_home() except ImportError: lowerCamelCase__ = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) lowerCamelCase__ = os.path.join(torch_cache_home, "transformers") lowerCamelCase__ = "https://cdn.huggingface.co" lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert" lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) lowerCamelCase__ = os.path.join(PATH, "config.yaml") lowerCamelCase__ = os.path.join(PATH, "attributes.txt") lowerCamelCase__ = os.path.join(PATH, "objects.txt") lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase__ = "pytorch_model.bin" lowerCamelCase__ = "config.yaml" def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _UpperCamelCase : Any = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = OrderedDict() with open(lowercase_ ,"rb" ) as f: _UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _UpperCamelCase : List[str] = ckp.pop(lowercase_ ) if isinstance(lowercase_ ,np.ndarray ): _UpperCamelCase : List[Any] = torch.tensor(lowercase_ ) else: assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ ) _UpperCamelCase : Optional[Any] = v return r class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = {} def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any: _UpperCamelCase : Optional[Any] = name _UpperCamelCase : Optional[Any] = level _UpperCamelCase : Union[str, Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _UpperCamelCase : Optional[int] = copy.deepcopy(__a ) _UpperCamelCase : Dict = copy.deepcopy(__a ) if isinstance(__a , __a ): _UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 ) _UpperCamelCase : Optional[Any] = v setattr(self , __a , __a ) _UpperCamelCase : Optional[Any] = d def __repr__( self : List[str] ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int: _UpperCamelCase : Any = val _UpperCamelCase : Optional[Any] = val _UpperCamelCase : Dict = key.split("." ) _UpperCamelCase : int = len(__a ) - 1 _UpperCamelCase : List[str] = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _UpperCamelCase : str = val else: _UpperCamelCase : List[str] = pointer[l] def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self._pointer def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict: with open(F'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]: with open(F'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]: with open(__a ) as stream: _UpperCamelCase : int = load(__a , Loader=__a ) return data def __str__( self : List[str] ) -> Tuple: _UpperCamelCase : List[str] = " " if self._name != "root": _UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n''' else: _UpperCamelCase : Any = "" _UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _UpperCamelCase : Optional[Any] = level return r[:-1] @classmethod def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a ) _UpperCamelCase : str = kwargs.pop("resume_download" , __a ) _UpperCamelCase : Any = kwargs.pop("proxies" , __a ) _UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _UpperCamelCase : Optional[Any] = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _UpperCamelCase : Optional[int] = pretrained_model_name_or_path else: _UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _UpperCamelCase : Optional[int] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _UpperCamelCase : List[Any] = Config.load_yaml(__a ) except EnvironmentError: _UpperCamelCase : Union[str, Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device ) _UpperCamelCase : str = in_tensor.numpy() _UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), ( F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Dict = urlparse(lowercase_ ) return parsed.scheme in ("http", "https") def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str: """simple docstring""" _UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowercase_ ,lowercase_ ): ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() ) elif isinstance(lowercase_ ,lowercase_ ): ua += "; " + user_agent _UpperCamelCase : Any = {"user-agent": ua} if resume_size > 0: _UpperCamelCase : str = "bytes=%d-" % (resume_size,) _UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ ) if response.status_code == 416: # Range not satisfiable return _UpperCamelCase : List[str] = response.headers.get("Content-Length" ) _UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None _UpperCamelCase : Optional[int] = tqdm( unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowercase_ ) ) temp_file.write(lowercase_ ) progress.close() def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple: """simple docstring""" if cache_dir is None: _UpperCamelCase : str = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Dict = str(lowercase_ ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) _UpperCamelCase : Dict = None if not local_files_only: try: _UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ ) if response.status_code == 200: _UpperCamelCase : str = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ ) # get cache path to put the file _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowercase_ ): return cache_path else: _UpperCamelCase : Optional[int] = [ file for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(lowercase_ ) > 0: return os.path.join(lowercase_ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(lowercase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _UpperCamelCase : Dict = cache_path + ".lock" with FileLock(lowercase_ ): # If the download just completed while the lock was activated. if os.path.exists(lowercase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _UpperCamelCase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(lowercase_ ,"a+b" ) as f: yield f _UpperCamelCase : Union[str, Any] = _resumable_file_manager if os.path.exists(lowercase_ ): _UpperCamelCase : str = os.stat(lowercase_ ).st_size else: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ ) _UpperCamelCase : Optional[Any] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,) http_get( lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,) os.replace(temp_file.name ,lowercase_ ) _UpperCamelCase : Optional[int] = {"url": url, "etag": etag} _UpperCamelCase : List[str] = cache_path + ".json" with open(lowercase_ ,"w" ) as meta_file: json.dump(lowercase_ ,lowercase_ ) return cache_path def lowercase__ ( lowercase_ ,lowercase_=None ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = url.encode("utf-8" ) _UpperCamelCase : List[str] = shaaaa(lowercase_ ) _UpperCamelCase : List[str] = url_hash.hexdigest() if etag: _UpperCamelCase : Optional[Any] = etag.encode("utf-8" ) _UpperCamelCase : Optional[Any] = shaaaa(lowercase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str: """simple docstring""" if cache_dir is None: _UpperCamelCase : List[Any] = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if is_remote_url(lowercase_ ): # URL, so get it from the cache (downloading if necessary) _UpperCamelCase : Union[str, Any] = get_from_cache( lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,) elif os.path.exists(lowercase_ ): # File, and it exists. _UpperCamelCase : List[str] = url_or_filename elif urlparse(lowercase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(lowercase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) ) if extract_compressed_file: if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ ) _UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted" _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions _UpperCamelCase : Optional[int] = output_path + ".lock" with FileLock(lowercase_ ): shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ ) os.makedirs(lowercase_ ) if is_zipfile(lowercase_ ): with ZipFile(lowercase_ ,"r" ) as zip_file: zip_file.extractall(lowercase_ ) zip_file.close() elif tarfile.is_tarfile(lowercase_ ): _UpperCamelCase : int = tarfile.open(lowercase_ ) tar_file.extractall(lowercase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) ) return output_path_extracted return output_path def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): with open(lowercase_ ) as f: _UpperCamelCase : Tuple = eval(f.read() ) else: _UpperCamelCase : str = requests.get(lowercase_ ) try: _UpperCamelCase : Optional[int] = requests.json() except Exception: _UpperCamelCase : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: _UpperCamelCase : List[Any] = eval(lowercase_ ) except Exception: _UpperCamelCase : int = data.split("\n" ) req.close() return data def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[Any] = requests.get(lowercase_ ) _UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowercase_ ) with open(lowercase_ ,"rb" ) as stream: _UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ ) _UpperCamelCase : Union[str, Any] = weights.pop("model" ) _UpperCamelCase : Optional[int] = {} for k, v in model.items(): _UpperCamelCase : str = torch.from_numpy(lowercase_ ) if "running_var" in k: _UpperCamelCase : List[Any] = torch.tensor([0] ) _UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" ) _UpperCamelCase : Any = zero return new def lowercase__ ( ) -> Dict: """simple docstring""" print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' ) def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): _UpperCamelCase : Optional[Any] = cva.imread(lowercase_ ) else: _UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ ) assert img is not None, F'''could not connect to: {im}''' _UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": _UpperCamelCase : List[Any] = img[:, :, ::-1] return img def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]: """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
310
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" _UpperCamelCase : int = int(number**0.5 ) return number == sq * sq def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> tuple[int, int]: """simple docstring""" _UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _UpperCamelCase : int = x_den * y_den * z_den _UpperCamelCase : int = gcd(lowercase_ ,lowercase_ ) top //= hcf bottom //= hcf return top, bottom def lowercase__ ( lowercase_ = 35 ) -> int: """simple docstring""" _UpperCamelCase : set = set() _UpperCamelCase : int _UpperCamelCase : Fraction = Fraction(0 ) _UpperCamelCase : tuple[int, int] for x_num in range(1 ,order + 1 ): for x_den in range(x_num + 1 ,order + 1 ): for y_num in range(1 ,order + 1 ): for y_den in range(y_num + 1 ,order + 1 ): # n=1 _UpperCamelCase : Dict = x_num * y_den + x_den * y_num _UpperCamelCase : List[Any] = x_den * y_den _UpperCamelCase : Optional[int] = gcd(lowercase_ ,lowercase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCamelCase : Union[str, Any] = add_three( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) unique_s.add(lowercase_ ) # n=2 _UpperCamelCase : str = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _UpperCamelCase : Tuple = x_den * x_den * y_den * y_den if is_sq(lowercase_ ) and is_sq(lowercase_ ): _UpperCamelCase : Any = int(sqrt(lowercase_ ) ) _UpperCamelCase : str = int(sqrt(lowercase_ ) ) _UpperCamelCase : Optional[Any] = gcd(lowercase_ ,lowercase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCamelCase : Optional[int] = add_three( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) unique_s.add(lowercase_ ) # n=-1 _UpperCamelCase : str = x_num * y_num _UpperCamelCase : str = x_den * y_num + x_num * y_den _UpperCamelCase : Dict = gcd(lowercase_ ,lowercase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCamelCase : Any = add_three( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) unique_s.add(lowercase_ ) # n=2 _UpperCamelCase : List[str] = x_num * x_num * y_num * y_num _UpperCamelCase : Dict = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowercase_ ) and is_sq(lowercase_ ): _UpperCamelCase : List[Any] = int(sqrt(lowercase_ ) ) _UpperCamelCase : str = int(sqrt(lowercase_ ) ) _UpperCamelCase : int = gcd(lowercase_ ,lowercase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCamelCase : List[str] = add_three( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) unique_s.add(lowercase_ ) for num, den in unique_s: total += Fraction(lowercase_ ,lowercase_ ) return total.denominator + total.numerator if __name__ == "__main__": print(f"""{solution() = }""")
310
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
1
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any ) -> int: _UpperCamelCase : int = "" _UpperCamelCase : Tuple = "" _UpperCamelCase : int = [] _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Dict = 256 _UpperCamelCase : str = 0 _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = 0 _UpperCamelCase : str = 0 def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] ) -> Optional[Any]: _UpperCamelCase : Any = cva.imread(__a , 0 ) _UpperCamelCase : Union[str, Any] = copy.deepcopy(self.img ) _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) _UpperCamelCase : Dict = np.sum(__a ) for i in range(len(__a ) ): _UpperCamelCase : Optional[int] = x[i] / self.k self.sk += prk _UpperCamelCase : int = (self.L - 1) * self.sk if self.rem != 0: _UpperCamelCase : Union[str, Any] = int(last % last ) _UpperCamelCase : Dict = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__a ) _UpperCamelCase : List[Any] = int(np.ma.count(self.img ) / self.img[1].size ) _UpperCamelCase : Optional[Any] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): _UpperCamelCase : Any = self.img[j][i] if num != self.last_list[num]: _UpperCamelCase : Dict = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg") lowerCamelCase__ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
310
"""simple docstring""" from typing import Any def lowercase__ ( lowercase_ ) -> list[Any]: """simple docstring""" if not input_list: return [] _UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list] _UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: torch.manual_seed(0 ) _UpperCamelCase : Any = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet _UpperCamelCase : List[Any] = PNDMScheduler() _UpperCamelCase : Any = PNDMPipeline(unet=__a , scheduler=__a ) pndm.to(__a ) pndm.set_progress_bar_config(disable=__a ) _UpperCamelCase : Optional[int] = torch.manual_seed(0 ) _UpperCamelCase : str = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" ).images _UpperCamelCase : int = torch.manual_seed(0 ) _UpperCamelCase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" , return_dict=__a )[0] _UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] _UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCamelCase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict: _UpperCamelCase : Optional[int] = "google/ddpm-cifar10-32" _UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(__a ) _UpperCamelCase : Optional[Any] = PNDMScheduler() _UpperCamelCase : str = PNDMPipeline(unet=__a , scheduler=__a ) pndm.to(__a ) pndm.set_progress_bar_config(disable=__a ) _UpperCamelCase : Union[str, Any] = torch.manual_seed(0 ) _UpperCamelCase : Tuple = pndm(generator=__a , output_type="numpy" ).images _UpperCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCamelCase : str = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
310
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = "rag" SCREAMING_SNAKE_CASE__ :List[str] = True def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any: super().__init__( bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" ) _UpperCamelCase : str = question_encoder_config.pop("model_type" ) _UpperCamelCase : Tuple = kwargs.pop("generator" ) _UpperCamelCase : str = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : str = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : Optional[int] = reduce_loss _UpperCamelCase : str = label_smoothing _UpperCamelCase : int = exclude_bos_score _UpperCamelCase : List[str] = do_marginalize _UpperCamelCase : Optional[int] = title_sep _UpperCamelCase : Optional[int] = doc_sep _UpperCamelCase : Union[str, Any] = n_docs _UpperCamelCase : Tuple = max_combined_length _UpperCamelCase : Union[str, Any] = dataset _UpperCamelCase : Any = dataset_split _UpperCamelCase : List[str] = index_name _UpperCamelCase : int = retrieval_vector_size _UpperCamelCase : str = retrieval_batch_size _UpperCamelCase : Dict = passages_path _UpperCamelCase : str = index_path _UpperCamelCase : Tuple = use_dummy_dataset _UpperCamelCase : Union[str, Any] = output_retrieved _UpperCamelCase : Optional[Any] = do_deduplication _UpperCamelCase : str = use_cache if self.forced_eos_token_id is None: _UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: _UpperCamelCase : Dict = copy.deepcopy(self.__dict__ ) _UpperCamelCase : List[Any] = self.question_encoder.to_dict() _UpperCamelCase : Tuple = self.generator.to_dict() _UpperCamelCase : Any = self.__class__.model_type return output
310
1
"""simple docstring""" from manim import * class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: _UpperCamelCase : int = Rectangle(height=0.5 , width=0.5 ) _UpperCamelCase : Optional[Any] = Rectangle(height=0.25 , width=0.25 ) _UpperCamelCase : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )] _UpperCamelCase : List[Any] = [mem.copy() for i in range(6 )] _UpperCamelCase : Any = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : int = VGroup(__a , __a ).arrange(__a , buff=0 ) _UpperCamelCase : List[str] = Text("CPU" , font_size=24 ) _UpperCamelCase : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__a ) _UpperCamelCase : Optional[int] = [mem.copy() for i in range(4 )] _UpperCamelCase : int = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : Union[str, Any] = Text("GPU" , font_size=24 ) _UpperCamelCase : int = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) gpu.move_to([-1, -1, 0] ) self.add(__a ) _UpperCamelCase : int = [mem.copy() for i in range(6 )] _UpperCamelCase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : Union[str, Any] = Text("Model" , font_size=24 ) _UpperCamelCase : List[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) model.move_to([3, -1.0, 0] ) self.add(__a ) _UpperCamelCase : List[str] = [] _UpperCamelCase : int = [] _UpperCamelCase : str = [] for i, rect in enumerate(__a ): rect.set_stroke(__a ) _UpperCamelCase : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__a , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__a , buff=0.0 ) self.add(__a ) model_cpu_arr.append(__a ) self.add(*__a , *__a , *__a ) _UpperCamelCase : int = [mem.copy() for i in range(6 )] _UpperCamelCase : Optional[Any] = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : Dict = Text("Loaded Checkpoint" , font_size=24 ) _UpperCamelCase : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) checkpoint.move_to([3, 0.5, 0] ) self.add(__a ) _UpperCamelCase : List[str] = [] _UpperCamelCase : str = [] for i, rect in enumerate(__a ): _UpperCamelCase : Union[str, Any] = fill.copy().set_fill(__a , opacity=0.7 ) target.move_to(__a ) ckpt_arr.append(__a ) _UpperCamelCase : Optional[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__a ) self.add(*__a , *__a ) _UpperCamelCase : List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCamelCase : Optional[int] = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__a , __a ) _UpperCamelCase : List[Any] = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__a ) _UpperCamelCase : Tuple = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) _UpperCamelCase : int = [meta_mem.copy() for i in range(6 )] _UpperCamelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )] _UpperCamelCase : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 ) _UpperCamelCase : Any = VGroup(__a , __a ).arrange(__a , buff=0 ) _UpperCamelCase : List[Any] = Text("Disk" , font_size=24 ) _UpperCamelCase : List[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__a , run_time=3 ) , Write(__a , run_time=1 ) , Create(__a , run_time=1 ) ) _UpperCamelCase : str = [] for i, rect in enumerate(__a ): _UpperCamelCase : Tuple = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__a , run_time=1.5 ) ) self.play(*__a ) self.play(FadeOut(__a ) ) _UpperCamelCase : Optional[int] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__a , run_time=3 ) ) self.play( FadeOut(__a , __a , *__a , *__a ) , ) self.wait()
310
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int: _UpperCamelCase : Tuple = parent _UpperCamelCase : str = batch_size _UpperCamelCase : Tuple = image_size _UpperCamelCase : List[str] = patch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : List[str] = is_training _UpperCamelCase : Any = use_labels _UpperCamelCase : int = hidden_size _UpperCamelCase : List[Any] = num_hidden_layers _UpperCamelCase : Union[str, Any] = num_attention_heads _UpperCamelCase : Optional[int] = intermediate_size _UpperCamelCase : Any = hidden_act _UpperCamelCase : Dict = hidden_dropout_prob _UpperCamelCase : Dict = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = type_sequence_label_size _UpperCamelCase : int = initializer_range _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Any = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2 _UpperCamelCase : Optional[int] = num_patches + 1 def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : Union[str, Any] = None if self.use_labels: _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase : Any = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = ViTModel(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]: _UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Any = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCamelCase : Union[str, Any] = 1 _UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a ) model.to(__a ) model.eval() _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : Dict = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int: _UpperCamelCase : Any = self.type_sequence_label_size _UpperCamelCase : Optional[Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase : Tuple = 1 _UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : List[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: _UpperCamelCase : Dict = self.prepare_config_and_inputs() ( ( _UpperCamelCase ), ( _UpperCamelCase ), ( _UpperCamelCase ), ) : Union[str, Any] = config_and_inputs _UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ :Any = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ :str = True SCREAMING_SNAKE_CASE__ :List[Any] = False SCREAMING_SNAKE_CASE__ :int = False SCREAMING_SNAKE_CASE__ :int = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase : Dict = ViTModelTester(self ) _UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: pass def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCamelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Any = model_class(__a ) _UpperCamelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : List[str] = [*signature.parameters.keys()] _UpperCamelCase : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : List[str] = ViTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: _UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a ) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[Any] = prepare_img() _UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : Dict = model(**__a ) # verify the logits _UpperCamelCase : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) _UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. _UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a ) _UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a ) # verify the logits _UpperCamelCase : int = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , __a ) _UpperCamelCase : int = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: _UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) _UpperCamelCase : int = self.default_image_processor _UpperCamelCase : Dict = prepare_img() _UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _UpperCamelCase : int = model(__a )
310
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[int] = "data2vec-vision" def __init__( self : Union[str, Any] , __a : List[str]=768 , __a : Optional[int]=12 , __a : Union[str, Any]=12 , __a : Optional[Any]=3072 , __a : List[Any]="gelu" , __a : Tuple=0.0 , __a : int=0.0 , __a : Optional[int]=0.02 , __a : Dict=1e-1_2 , __a : int=224 , __a : Optional[int]=16 , __a : Dict=3 , __a : List[Any]=False , __a : List[Any]=False , __a : int=False , __a : List[Any]=False , __a : Optional[Any]=0.1 , __a : List[Any]=0.1 , __a : List[Any]=True , __a : Any=[3, 5, 7, 11] , __a : int=[1, 2, 3, 6] , __a : Dict=True , __a : Optional[Any]=0.4 , __a : Optional[Any]=256 , __a : List[Any]=1 , __a : Any=False , __a : List[Any]=255 , **__a : int , ) -> Any: super().__init__(**__a ) _UpperCamelCase : int = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : Any = intermediate_size _UpperCamelCase : Dict = hidden_act _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : str = initializer_range _UpperCamelCase : int = layer_norm_eps _UpperCamelCase : Any = image_size _UpperCamelCase : List[Any] = patch_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : int = use_mask_token _UpperCamelCase : Optional[Any] = use_absolute_position_embeddings _UpperCamelCase : Union[str, Any] = use_relative_position_bias _UpperCamelCase : List[str] = use_shared_relative_position_bias _UpperCamelCase : Tuple = layer_scale_init_value _UpperCamelCase : Union[str, Any] = drop_path_rate _UpperCamelCase : List[Any] = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCamelCase : str = out_indices _UpperCamelCase : Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCamelCase : Tuple = use_auxiliary_head _UpperCamelCase : List[Any] = auxiliary_loss_weight _UpperCamelCase : List[str] = auxiliary_channels _UpperCamelCase : Tuple = auxiliary_num_convs _UpperCamelCase : str = auxiliary_concat_input _UpperCamelCase : Union[str, Any] = semantic_loss_ignore_index class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> float: return 1e-4
310
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[int] = -1 _UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Any = TextStreamer(__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Optional[int] = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Dict = -1 _UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] ) _UpperCamelCase : Tuple = TextIteratorStreamer(__a ) _UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() _UpperCamelCase : Tuple = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict: _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Union[str, Any] = -1 _UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :] _UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Tuple = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" ) _UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a ) _UpperCamelCase : int = -1 _UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a ) model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCamelCase : int = cs.out[:-1] # Remove the final "\n" _UpperCamelCase : int = tokenizer(__a , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[Any] = -1 _UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 ) _UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__a ): _UpperCamelCase : List[str] = "" for new_text in streamer: streamer_text += new_text
310
1
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "tensor(bool)": np.bool_, "tensor(int8)": np.inta, "tensor(uint8)": np.uinta, "tensor(int16)": np.intaa, "tensor(uint16)": np.uintaa, "tensor(int32)": np.intaa, "tensor(uint32)": np.uintaa, "tensor(int64)": np.intaa, "tensor(uint64)": np.uintaa, "tensor(float16)": np.floataa, "tensor(float)": np.floataa, "tensor(double)": np.floataa, } class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , __a : Union[str, Any]=None , **__a : Union[str, Any] ) -> Optional[Any]: logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) _UpperCamelCase : Dict = model _UpperCamelCase : Tuple = kwargs.get("model_save_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.get("latest_model_name" , __a ) def __call__( self : Optional[Any] , **__a : Dict ) -> Tuple: _UpperCamelCase : str = {k: np.array(__a ) for k, v in kwargs.items()} return self.model.run(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Path] , __a : str=None , __a : Optional[Any]=None ) -> List[Any]: if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) _UpperCamelCase : Any = "CPUExecutionProvider" return ort.InferenceSession(__a , providers=[provider] , sess_options=__a ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Path] , __a : Optional[str] = None , **__a : Optional[int] ) -> str: _UpperCamelCase : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME _UpperCamelCase : Dict = self.model_save_dir.joinpath(self.latest_model_name ) _UpperCamelCase : Tuple = Path(__a ).joinpath(__a ) try: shutil.copyfile(__a , __a ) except shutil.SameFileError: pass # copy external weights (for models >2GB) _UpperCamelCase : Any = self.model_save_dir.joinpath(__a ) if src_path.exists(): _UpperCamelCase : int = Path(__a ).joinpath(__a ) try: shutil.copyfile(__a , __a ) except shutil.SameFileError: pass def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, os.PathLike] , **__a : Optional[int] , ) -> Tuple: if os.path.isfile(__a ): logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(__a , exist_ok=__a ) # saving model weights/files self._save_pretrained(__a , **__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : List[Any] , __a : Union[str, Path] , __a : Optional[Union[bool, str, None]] = None , __a : Optional[Union[str, None]] = None , __a : bool = False , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional["ort.SessionOptions"] = None , **__a : Tuple , ) -> Any: _UpperCamelCase : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(__a ): _UpperCamelCase : Dict = OnnxRuntimeModel.load_model( os.path.join(__a , __a ) , provider=__a , sess_options=__a ) _UpperCamelCase : Any = Path(__a ) # load model from hub else: # download model _UpperCamelCase : List[str] = hf_hub_download( repo_id=__a , filename=__a , use_auth_token=__a , revision=__a , cache_dir=__a , force_download=__a , ) _UpperCamelCase : Optional[int] = Path(__a ).parent _UpperCamelCase : str = Path(__a ).name _UpperCamelCase : Optional[int] = OnnxRuntimeModel.load_model(__a , provider=__a , sess_options=__a ) return cls(model=__a , **__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : int , __a : Union[str, Path] , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = None , **__a : Union[str, Any] , ) -> Union[str, Any]: _UpperCamelCase : List[str] = None if len(str(__a ).split("@" ) ) == 2: _UpperCamelCase, _UpperCamelCase : Optional[int] = model_id.split("@" ) return cls._from_pretrained( model_id=__a , revision=__a , cache_dir=__a , force_download=__a , use_auth_token=__a , **__a , )
310
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" with open(lowercase_ ) as metadata_file: _UpperCamelCase : Dict = json.load(lowercase_ ) _UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"] # Load the entity vocab file _UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ ) # add an entry for [MASK2] _UpperCamelCase : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) _UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f: _UpperCamelCase : Tuple = json.load(lowercase_ ) _UpperCamelCase : Optional[int] = "MLukeTokenizer" with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) _UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ ) # Initialize the embeddings of the special tokens _UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0] _UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0] _UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) _UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCamelCase : Optional[Any] = state_dict[bias_name] _UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.''' _UpperCamelCase : List[Any] = state_dict[prefix + matrix_name] _UpperCamelCase : str = state_dict[prefix + matrix_name] _UpperCamelCase : Any = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCamelCase : int = state_dict["entity_predictions.bias"] _UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _UpperCamelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _UpperCamelCase : Union[str, Any] = state_dict[key] else: _UpperCamelCase : Dict = state_dict[key] _UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ ) if set(lowercase_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(lowercase_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" ) _UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _UpperCamelCase : Optional[Any] = (0, 9) _UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : List[str] = model(**lowercase_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 33, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 1, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ) _UpperCamelCase : int = "Tokyo is the capital of <mask>." _UpperCamelCase : List[Any] = (24, 30) _UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : Optional[Any] = model(**lowercase_ ) _UpperCamelCase : int = encoding["input_ids"][0].tolist() _UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase_ ) _UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item() _UpperCamelCase : Tuple = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase_ ) ) model.save_pretrained(lowercase_ ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"] _UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )] _UpperCamelCase : List[str] = {} for entry in data: _UpperCamelCase : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCamelCase : Dict = entity_id break _UpperCamelCase : Dict = F'''{language}:{entity_name}''' _UpperCamelCase : str = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
310
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = "▁" lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"} lowerCamelCase__ = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } lowerCamelCase__ = { "facebook/nllb-200-distilled-600M": 1024, } # fmt: off lowerCamelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ :Union[str, Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE__ :List[int] = [] SCREAMING_SNAKE_CASE__ :List[int] = [] def __init__( self : Optional[int] , __a : Union[str, Any] , __a : Tuple="<s>" , __a : List[Any]="</s>" , __a : Optional[int]="</s>" , __a : int="<s>" , __a : str="<unk>" , __a : Dict="<pad>" , __a : str="<mask>" , __a : Dict=None , __a : str=None , __a : Any=None , __a : Optional[Dict[str, Any]] = None , __a : List[str]=None , __a : Any=False , **__a : Dict , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token _UpperCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs _UpperCamelCase : Optional[Any] = legacy_behaviour super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__a , **__a , ) _UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__a ) ) _UpperCamelCase : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase : Tuple = 1 _UpperCamelCase : Optional[Any] = len(self.sp_model ) _UpperCamelCase : List[Any] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a ) } _UpperCamelCase : List[str] = {v: k for k, v in self.lang_code_to_id.items()} _UpperCamelCase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _UpperCamelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _UpperCamelCase : List[Any] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _UpperCamelCase : Any = src_lang if src_lang is not None else "eng_Latn" _UpperCamelCase : List[Any] = self.lang_code_to_id[self._src_lang] _UpperCamelCase : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ) -> Optional[Any]: _UpperCamelCase : Optional[int] = self.__dict__.copy() _UpperCamelCase : List[str] = None _UpperCamelCase : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : str , __a : int ) -> Any: _UpperCamelCase : str = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _UpperCamelCase : Dict = {} _UpperCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Any , __a : str ) -> None: _UpperCamelCase : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) _UpperCamelCase : int = [1] * len(self.prefix_tokens ) _UpperCamelCase : Optional[Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__a )) + suffix_ones return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Tuple ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) _UpperCamelCase : str = src_lang _UpperCamelCase : Dict = self(__a , add_special_tokens=__a , return_tensors=__a , **__a ) _UpperCamelCase : List[str] = self.convert_tokens_to_ids(__a ) _UpperCamelCase : Optional[Any] = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: _UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __SCREAMING_SNAKE_CASE ( self : Any , __a : str ) -> List[str]: return self.sp_model.encode(__a , out_type=__a ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase : Optional[Any] = self.sp_model.PieceToId(__a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[int] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[str] ) -> Dict: _UpperCamelCase : List[Any] = "".join(__a ).replace(__a , " " ).strip() return out_string def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCamelCase : Optional[Any] = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , "wb" ) as fi: _UpperCamelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : List[Any] , ) -> BatchEncoding: _UpperCamelCase : str = src_lang _UpperCamelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(__a , __a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int ) -> None: _UpperCamelCase : Optional[Any] = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _UpperCamelCase : List[str] = [] _UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : str = [self.cur_lang_code] _UpperCamelCase : Tuple = [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> None: _UpperCamelCase : Tuple = self.lang_code_to_id[lang] if self.legacy_behaviour: _UpperCamelCase : Dict = [] _UpperCamelCase : List[Any] = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : List[str] = [self.cur_lang_code] _UpperCamelCase : Union[str, Any] = [self.eos_token_id]
310
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a ) }
310
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = "funnel" SCREAMING_SNAKE_CASE__ :int = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : Any , __a : Dict=3_0522 , __a : List[Any]=[4, 4, 4] , __a : str=None , __a : int=2 , __a : Union[str, Any]=768 , __a : Dict=12 , __a : Optional[int]=64 , __a : Optional[Any]=3072 , __a : Tuple="gelu_new" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : Optional[Any]=0.0 , __a : Optional[int]=0.1 , __a : Any=None , __a : str=1e-9 , __a : Union[str, Any]="mean" , __a : Optional[int]="relative_shift" , __a : Any=True , __a : str=True , __a : Union[str, Any]=True , **__a : Optional[int] , ) -> List[Any]: _UpperCamelCase : Optional[int] = vocab_size _UpperCamelCase : Union[str, Any] = block_sizes _UpperCamelCase : Tuple = [1] * len(__a ) if block_repeats is None else block_repeats assert len(__a ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." _UpperCamelCase : List[Any] = num_decoder_layers _UpperCamelCase : Optional[Any] = d_model _UpperCamelCase : Dict = n_head _UpperCamelCase : str = d_head _UpperCamelCase : str = d_inner _UpperCamelCase : str = hidden_act _UpperCamelCase : int = hidden_dropout _UpperCamelCase : Any = attention_dropout _UpperCamelCase : Tuple = activation_dropout _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : str = initializer_std _UpperCamelCase : Any = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' _UpperCamelCase : Dict = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' _UpperCamelCase : Tuple = attention_type _UpperCamelCase : Any = separate_cls _UpperCamelCase : str = truncate_seq _UpperCamelCase : Any = pool_q_only super().__init__(**__a ) @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: return sum(self.block_sizes ) @num_hidden_layers.setter def __SCREAMING_SNAKE_CASE ( self : int , __a : Union[str, Any] ) -> Union[str, Any]: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: return len(self.block_sizes ) @num_blocks.setter def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict ) -> int: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
310
"""simple docstring""" from __future__ import annotations from math import pi def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = ["image_processor"] SCREAMING_SNAKE_CASE__ :Optional[int] = "SamImageProcessor" def __init__( self : Dict , __a : str ) -> Dict: super().__init__(__a ) _UpperCamelCase : int = self.image_processor _UpperCamelCase : str = -10 _UpperCamelCase : int = self.image_processor.size["longest_edge"] def __call__( self : Optional[Any] , __a : int=None , __a : Union[str, Any]=None , __a : Any=None , __a : Tuple=None , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[int] , ) -> BatchEncoding: _UpperCamelCase : List[Any] = self.image_processor( __a , return_tensors=__a , **__a , ) # pop arguments that are not used in the foward but used nevertheless _UpperCamelCase : Any = encoding_image_processor["original_sizes"] if hasattr(__a , "numpy" ): # Checks if Torch or TF tensor _UpperCamelCase : Union[str, Any] = original_sizes.numpy() _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = self._check_and_preprocess_points( input_points=__a , input_labels=__a , input_boxes=__a , ) _UpperCamelCase : Tuple = self._normalize_and_convert( __a , __a , input_points=__a , input_labels=__a , input_boxes=__a , return_tensors=__a , ) return encoding_image_processor def __SCREAMING_SNAKE_CASE ( self : str , __a : List[Any] , __a : Optional[int] , __a : int=None , __a : Union[str, Any]=None , __a : List[Any]=None , __a : List[str]="pt" , ) -> List[str]: if input_points is not None: if len(__a ) != len(__a ): _UpperCamelCase : Union[str, Any] = [ self._normalize_coordinates(self.target_size , __a , original_sizes[0] ) for point in input_points ] else: _UpperCamelCase : Optional[Any] = [ self._normalize_coordinates(self.target_size , __a , __a ) for point, original_size in zip(__a , __a ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: _UpperCamelCase, _UpperCamelCase : Union[str, Any] = self._pad_points_and_labels(__a , __a ) _UpperCamelCase : List[Any] = np.array(__a ) if input_labels is not None: _UpperCamelCase : Dict = np.array(__a ) if input_boxes is not None: if len(__a ) != len(__a ): _UpperCamelCase : str = [ self._normalize_coordinates(self.target_size , __a , original_sizes[0] , is_bounding_box=__a ) for box in input_boxes ] else: _UpperCamelCase : Dict = [ self._normalize_coordinates(self.target_size , __a , __a , is_bounding_box=__a ) for box, original_size in zip(__a , __a ) ] _UpperCamelCase : Any = np.array(__a ) if input_boxes is not None: if return_tensors == "pt": _UpperCamelCase : Any = torch.from_numpy(__a ) # boxes batch size of 1 by default _UpperCamelCase : Tuple = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": _UpperCamelCase : str = tf.convert_to_tensor(__a ) # boxes batch size of 1 by default _UpperCamelCase : int = tf.expand_dims(__a , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes} ) if input_points is not None: if return_tensors == "pt": _UpperCamelCase : List[Any] = torch.from_numpy(__a ) # point batch size of 1 by default _UpperCamelCase : List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": _UpperCamelCase : List[Any] = tf.convert_to_tensor(__a ) # point batch size of 1 by default _UpperCamelCase : int = tf.expand_dims(__a , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"input_points": input_points} ) if input_labels is not None: if return_tensors == "pt": _UpperCamelCase : List[Any] = torch.from_numpy(__a ) # point batch size of 1 by default _UpperCamelCase : Tuple = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": _UpperCamelCase : Optional[int] = tf.convert_to_tensor(__a ) # point batch size of 1 by default _UpperCamelCase : Any = tf.expand_dims(__a , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels} ) return encoding_image_processor def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any , __a : Optional[Any] ) -> List[str]: _UpperCamelCase : Optional[Any] = max([point.shape[0] for point in input_points] ) _UpperCamelCase : int = [] for i, point in enumerate(__a ): if point.shape[0] != expected_nb_points: _UpperCamelCase : Optional[Any] = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) _UpperCamelCase : List[str] = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(__a ) _UpperCamelCase : Any = processed_input_points return input_points, input_labels def __SCREAMING_SNAKE_CASE ( self : int , __a : int , __a : np.ndarray , __a : int , __a : Tuple=False ) -> np.ndarray: _UpperCamelCase, _UpperCamelCase : str = original_size _UpperCamelCase, _UpperCamelCase : List[str] = self.image_processor._get_preprocess_shape(__a , longest_edge=__a ) _UpperCamelCase : Optional[Any] = deepcopy(__a ).astype(__a ) if is_bounding_box: _UpperCamelCase : str = coords.reshape(-1 , 2 , 2 ) _UpperCamelCase : List[str] = coords[..., 0] * (new_w / old_w) _UpperCamelCase : List[Any] = coords[..., 1] * (new_h / old_h) if is_bounding_box: _UpperCamelCase : Dict = coords.reshape(-1 , 4 ) return coords def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : str=None , ) -> Any: if input_points is not None: if hasattr(__a , "numpy" ): # Checks for TF or Torch tensor _UpperCamelCase : int = input_points.numpy().tolist() if not isinstance(__a , __a ) or not isinstance(input_points[0] , __a ): raise ValueError("Input points must be a list of list of floating points." ) _UpperCamelCase : Optional[int] = [np.array(__a ) for input_point in input_points] else: _UpperCamelCase : Optional[Any] = None if input_labels is not None: if hasattr(__a , "numpy" ): _UpperCamelCase : str = input_labels.numpy().tolist() if not isinstance(__a , __a ) or not isinstance(input_labels[0] , __a ): raise ValueError("Input labels must be a list of list integers." ) _UpperCamelCase : Optional[Any] = [np.array(__a ) for label in input_labels] else: _UpperCamelCase : List[Any] = None if input_boxes is not None: if hasattr(__a , "numpy" ): _UpperCamelCase : List[Any] = input_boxes.numpy().tolist() if ( not isinstance(__a , __a ) or not isinstance(input_boxes[0] , __a ) or not isinstance(input_boxes[0][0] , __a ) ): raise ValueError("Input boxes must be a list of list of list of floating points." ) _UpperCamelCase : Optional[int] = [np.array(__a ).astype(np.floataa ) for box in input_boxes] else: _UpperCamelCase : Dict = None return input_points, input_labels, input_boxes @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: _UpperCamelCase : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , *__a : Optional[Any] , **__a : List[str] ) -> Optional[Any]: return self.image_processor.post_process_masks(*__a , **__a )
310
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( lowercase_ ) -> str: """simple docstring""" if "://" in dataset_path: _UpperCamelCase : List[Any] = dataset_path.split("://" )[1] return dataset_path def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ ) def lowercase__ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCamelCase : Dict = None _UpperCamelCase : str = None _UpperCamelCase : str = threading.Lock()
310
1
"""simple docstring""" from math import sqrt def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and ( number >= 0 ), "'number' must been an int and positive" _UpperCamelCase : Optional[int] = True # 0 and 1 are none primes. if number <= 1: _UpperCamelCase : str = False for divisor in range(2 ,int(round(sqrt(lowercase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: _UpperCamelCase : Any = False break # precondition assert isinstance(lowercase_ ,lowercase_ ), "'status' must been from type bool" return status def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N _UpperCamelCase : Tuple = list(range(2 ,n + 1 ) ) _UpperCamelCase : Dict = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowercase_ ) ): for j in range(i + 1 ,len(lowercase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): _UpperCamelCase : List[str] = 0 # filters actual prime numbers. _UpperCamelCase : Union[str, Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type list" return ans def lowercase__ ( lowercase_ ) -> str: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and (n > 2), "'N' must been an int and > 2" _UpperCamelCase : Union[str, Any] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 ,n + 1 ): if is_prime(lowercase_ ): ans.append(lowercase_ ) # precondition assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type list" return ans def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and number >= 0, "'number' must been an int and >= 0" _UpperCamelCase : int = [] # this list will be returns of the function. # potential prime number factors. _UpperCamelCase : str = 2 _UpperCamelCase : List[Any] = number if number == 0 or number == 1: ans.append(lowercase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowercase_ ): while quotient != 1: if is_prime(lowercase_ ) and (quotient % factor == 0): ans.append(lowercase_ ) quotient /= factor else: factor += 1 else: ans.append(lowercase_ ) # precondition assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type list" return ans def lowercase__ ( lowercase_ ) -> Union[str, Any]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase : Optional[Any] = 0 # prime factorization of 'number' _UpperCamelCase : str = prime_factorization(lowercase_ ) _UpperCamelCase : Optional[Any] = max(lowercase_ ) # precondition assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type int" return ans def lowercase__ ( lowercase_ ) -> List[str]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase : Union[str, Any] = 0 # prime factorization of 'number' _UpperCamelCase : Dict = prime_factorization(lowercase_ ) _UpperCamelCase : int = min(lowercase_ ) # precondition assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type int" return ans def lowercase__ ( lowercase_ ) -> Union[str, Any]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 ,lowercase_ ), "compare bust been from type bool" return number % 2 == 0 def lowercase__ ( lowercase_ ) -> str: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 ,lowercase_ ), "compare bust been from type bool" return number % 2 != 0 def lowercase__ ( lowercase_ ) -> List[str]: """simple docstring""" assert ( isinstance(lowercase_ ,lowercase_ ) and (number > 2) and is_even(lowercase_ ) ), "'number' must been an int, even and > 2" _UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' _UpperCamelCase : Any = get_prime_numbers(lowercase_ ) _UpperCamelCase : int = len(lowercase_ ) # run variable for while-loops. _UpperCamelCase : Optional[Any] = 0 _UpperCamelCase : List[Any] = None # exit variable. for break up the loops _UpperCamelCase : Optional[int] = True while i < len_pn and loop: _UpperCamelCase : Union[str, Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: _UpperCamelCase : List[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowercase_ ,lowercase_ ) and (len(lowercase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowercase__ ( lowercase_ ,lowercase_ ) -> int: """simple docstring""" assert ( isinstance(lowercase_ ,lowercase_ ) and isinstance(lowercase_ ,lowercase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase : List[Any] = 0 while numbera != 0: _UpperCamelCase : Any = numbera % numbera _UpperCamelCase : Optional[int] = numbera _UpperCamelCase : Dict = rest # precondition assert isinstance(lowercase_ ,lowercase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowercase__ ( lowercase_ ,lowercase_ ) -> Any: """simple docstring""" assert ( isinstance(lowercase_ ,lowercase_ ) and isinstance(lowercase_ ,lowercase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase : Tuple = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' _UpperCamelCase : Any = prime_factorization(lowercase_ ) _UpperCamelCase : Optional[Any] = prime_factorization(lowercase_ ) elif numbera == 1 or numbera == 1: _UpperCamelCase : Dict = [] _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : Dict = max(lowercase_ ,lowercase_ ) _UpperCamelCase : Any = 0 _UpperCamelCase : Dict = 0 _UpperCamelCase : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: _UpperCamelCase : Optional[int] = prime_fac_a.count(lowercase_ ) _UpperCamelCase : Optional[Any] = prime_fac_a.count(lowercase_ ) for _ in range(max(lowercase_ ,lowercase_ ) ): ans *= n else: _UpperCamelCase : int = prime_fac_a.count(lowercase_ ) for _ in range(lowercase_ ): ans *= n done.append(lowercase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: _UpperCamelCase : Tuple = prime_fac_a.count(lowercase_ ) for _ in range(lowercase_ ): ans *= n done.append(lowercase_ ) # precondition assert isinstance(lowercase_ ,lowercase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowercase__ ( lowercase_ ) -> Dict: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and (n >= 0), "'number' must been a positive int" _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Optional[Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowercase_ ): ans += 1 # precondition assert isinstance(lowercase_ ,lowercase_ ) and is_prime( lowercase_ ), "'ans' must been a prime number and from type int" return ans def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple: """simple docstring""" assert ( is_prime(lowercase_ ) and is_prime(lowercase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" _UpperCamelCase : Any = p_number_a + 1 # jump to the next number _UpperCamelCase : int = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowercase_ ): number += 1 while number < p_number_a: ans.append(lowercase_ ) number += 1 # fetch the next prime number. while not is_prime(lowercase_ ): number += 1 # precondition assert ( isinstance(lowercase_ ,lowercase_ ) and ans[0] != p_number_a and ans[len(lowercase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and (n >= 1), "'n' must been int and >= 1" _UpperCamelCase : Tuple = [] # will be returned. for divisor in range(1 ,n + 1 ): if n % divisor == 0: ans.append(lowercase_ ) # precondition assert ans[0] == 1 and ans[len(lowercase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowercase__ ( lowercase_ ) -> Dict: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and ( number > 1 ), "'number' must been an int and >= 1" _UpperCamelCase : List[Any] = get_divisors(lowercase_ ) # precondition assert ( isinstance(lowercase_ ,lowercase_ ) and (divisors[0] == 1) and (divisors[len(lowercase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" assert ( isinstance(lowercase_ ,lowercase_ ) and isinstance(lowercase_ ,lowercase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. _UpperCamelCase : str = gcd(abs(lowercase_ ) ,abs(lowercase_ ) ) # precondition assert ( isinstance(lowercase_ ,lowercase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and (n >= 0), "'n' must been a int and >= 0" _UpperCamelCase : List[Any] = 1 # this will be return. for factor in range(1 ,n + 1 ): ans *= factor return ans def lowercase__ ( lowercase_ ) -> List[str]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) and (n >= 0), "'n' must been an int and >= 0" _UpperCamelCase : Dict = 0 _UpperCamelCase : str = 1 _UpperCamelCase : str = 1 # this will be return for _ in range(n - 1 ): _UpperCamelCase : Any = ans ans += fiba _UpperCamelCase : Optional[Any] = tmp return ans
310
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
310
1
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowercase__ ( lowercase_ ) -> tuple: """simple docstring""" return (data["data"], data["target"]) def lowercase__ ( lowercase_ ,lowercase_ ) -> XGBClassifier: """simple docstring""" _UpperCamelCase : Optional[Any] = XGBClassifier() classifier.fit(lowercase_ ,lowercase_ ) return classifier def lowercase__ ( ) -> None: """simple docstring""" _UpperCamelCase : Optional[int] = load_iris() _UpperCamelCase, _UpperCamelCase : Union[str, Any] = data_handling(lowercase_ ) _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Union[str, Any] = train_test_split( lowercase_ ,lowercase_ ,test_size=0.25 ) _UpperCamelCase : List[Any] = iris["target_names"] # Create an XGBoost Classifier from the training data _UpperCamelCase : Optional[int] = xgboost(lowercase_ ,lowercase_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase_ ,lowercase_ ,lowercase_ ,display_labels=lowercase_ ,cmap="Blues" ,normalize="true" ,) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
310
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100""" lowerCamelCase__ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: lowerCamelCase__ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: lowerCamelCase__ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
310
1
"""simple docstring""" import argparse import os import re lowerCamelCase__ = "src/transformers/models/auto" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCamelCase__ = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict") # re pattern that matches identifiers in mappings lowerCamelCase__ = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"") def lowercase__ ( lowercase_ ,lowercase_ = False ) -> Any: """simple docstring""" with open(lowercase_ ,"r" ,encoding="utf-8" ) as f: _UpperCamelCase : List[str] = f.read() _UpperCamelCase : Dict = content.split("\n" ) _UpperCamelCase : Optional[int] = [] _UpperCamelCase : Optional[Any] = 0 while line_idx < len(lowercase_ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _UpperCamelCase : List[Any] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _UpperCamelCase : Dict = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _UpperCamelCase : Tuple = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _UpperCamelCase : List[str] = sorted(lowercase_ ,key=lambda lowercase_ : _re_identifier.search(lowercase_ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase_ ,"w" ,encoding="utf-8" ) as f: f.write("\n".join(lowercase_ ) ) elif "\n".join(lowercase_ ) != content: return True def lowercase__ ( lowercase_ = False ) -> Any: """simple docstring""" _UpperCamelCase : int = [os.path.join(lowercase_ ,lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith(".py" )] _UpperCamelCase : Optional[int] = [sort_auto_mapping(lowercase_ ,overwrite=lowercase_ ) for fname in fnames] if not overwrite and any(lowercase_ ): _UpperCamelCase : Dict = [f for f, d in zip(lowercase_ ,lowercase_ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(lowercase_ )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") lowerCamelCase__ = parser.parse_args() sort_all_auto_mappings(not args.check_only)
310
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl" def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Union[str, Any] = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = max_position_embeddings _UpperCamelCase : Optional[Any] = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
1
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase__ = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) lowerCamelCase__ = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) lowerCamelCase__ = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) lowerCamelCase__ = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) lowerCamelCase__ = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) lowerCamelCase__ = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) lowerCamelCase__ = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def lowercase__ ( ) -> Tuple: """simple docstring""" _UpperCamelCase, _UpperCamelCase : Any = randrange(len(lowercase_ ) ), randrange(len(lowercase_ ) ) _UpperCamelCase : str = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] _UpperCamelCase, _UpperCamelCase : int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase__ ( lowercase_ = 100 ) -> int: """simple docstring""" return (generate_random_hand() for _ in range(lowercase_ )) @pytest.mark.parametrize("hand, expected" ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" assert PokerHand(lowercase_ )._is_flush() == expected @pytest.mark.parametrize("hand, expected" ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple: """simple docstring""" assert PokerHand(lowercase_ )._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values" ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = PokerHand(lowercase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected" ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" assert PokerHand(lowercase_ )._is_same_kind() == expected @pytest.mark.parametrize("hand, expected" ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" assert PokerHand(lowercase_ )._hand_type == expected @pytest.mark.parametrize("hand, other, expected" ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" assert PokerHand(lowercase_ ).compare_with(PokerHand(lowercase_ ) ) == expected @pytest.mark.parametrize("hand, other, expected" ,generate_random_hands() ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" assert PokerHand(lowercase_ ).compare_with(PokerHand(lowercase_ ) ) == expected def lowercase__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Tuple = [PokerHand(lowercase_ ) for hand in SORTED_HANDS] _UpperCamelCase : Union[str, Any] = poker_hands.copy() shuffle(lowercase_ ) _UpperCamelCase : str = chain(sorted(lowercase_ ) ) for index, hand in enumerate(lowercase_ ): assert hand == poker_hands[index] def lowercase__ ( ) -> Any: """simple docstring""" _UpperCamelCase : Union[str, Any] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )] pokerhands.sort(reverse=lowercase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Optional[int] = PokerHand("2C 4S AS 3D 5C" ) _UpperCamelCase : str = True _UpperCamelCase : List[Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase__ ( ) -> Any: """simple docstring""" _UpperCamelCase : Optional[Any] = 0 _UpperCamelCase : Tuple = os.path.abspath(os.path.dirname(lowercase_ ) ) _UpperCamelCase : Dict = os.path.join(lowercase_ ,"poker_hands.txt" ) with open(lowercase_ ) as file_hand: for line in file_hand: _UpperCamelCase : Optional[int] = line[:14].strip() _UpperCamelCase : Tuple = line[15:].strip() _UpperCamelCase, _UpperCamelCase : Union[str, Any] = PokerHand(lowercase_ ), PokerHand(lowercase_ ) _UpperCamelCase : Optional[Any] = player.compare_with(lowercase_ ) if output == "Win": answer += 1 assert answer == 376
310
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]: _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int: _UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) import datasets _UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _UpperCamelCase : List[Any] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @require_torch def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3" _UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) _UpperCamelCase : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = "facebook/detr-resnet-50" _UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Dict = "facebook/detr-resnet-50" _UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a ) _UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : Tuple = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: _UpperCamelCase : Tuple = 0.99_85 _UpperCamelCase : List[Any] = "facebook/detr-resnet-50" _UpperCamelCase : List[str] = pipeline("object-detection" , model=__a ) _UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd" _UpperCamelCase : int = 0.99_93 _UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a ) _UpperCamelCase : Union[str, Any] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
310
1
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase__ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex lowerCamelCase__ = 10 lowerCamelCase__ = 256 def lowercase__ ( lowercase_ ) -> Optional[MinHash]: """simple docstring""" if len(lowercase_ ) < MIN_NUM_TOKENS: return None _UpperCamelCase : Any = MinHash(num_perm=lowercase_ ) for token in set(lowercase_ ): min_hash.update(token.encode() ) return min_hash def lowercase__ ( lowercase_ ) -> Set[str]: """simple docstring""" return {t for t in NON_ALPHA.split(lowercase_ ) if len(t.strip() ) > 0} class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> int: _UpperCamelCase : List[Any] = duplication_jaccard_threshold _UpperCamelCase : Tuple = NUM_PERM _UpperCamelCase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _UpperCamelCase : Union[str, Any] = defaultdict(__a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : Tuple , __a : MinHash ) -> None: _UpperCamelCase : List[str] = self._index.query(__a ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[List[Dict]]: _UpperCamelCase : Union[str, Any] = [] for base, duplicates in self._duplicate_clusters.items(): _UpperCamelCase : Any = [base] + list(__a ) # reformat the cluster to be a list of dict _UpperCamelCase : Any = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def __SCREAMING_SNAKE_CASE ( self : int , __a : Union[str, Any] ) -> None: _UpperCamelCase : Optional[Any] = self.get_duplicate_clusters() with open(__a , "w" ) as f: json.dump(__a , __a ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase, _UpperCamelCase : Any = element _UpperCamelCase : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash ,ThreadedIterator(lowercase_ ,max_queue_size=10_000 ) ,chunksize=100 ,): if data is not None: yield data def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : Any = DuplicationIndex(duplication_jaccard_threshold=lowercase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase_ ) ) ,max_queue_size=100 ) ): di.add(lowercase_ ,lowercase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowercase__ ( lowercase_ ,lowercase_ ) -> float: """simple docstring""" _UpperCamelCase : Optional[int] = get_tokens(lowercase_ ) _UpperCamelCase : Any = get_tokens(lowercase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase__ = None def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Any = [] for elementa in cluster: _UpperCamelCase : Any = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: _UpperCamelCase : Dict = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(lowercase_ ,lowercase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: _UpperCamelCase : Tuple = 1 extremes.append(lowercase_ ) return extremes def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" global _shared_dataset _UpperCamelCase : List[str] = dataset _UpperCamelCase : int = [] _UpperCamelCase : List[str] = partial(_find_cluster_extremes_shared ,jaccard_threshold=lowercase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowercase_ ,lowercase_ ,) ,total=len(lowercase_ ) ,): extremes_list.append(lowercase_ ) return extremes_list def lowercase__ ( lowercase_ ,lowercase_ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: """simple docstring""" _UpperCamelCase : List[str] = make_duplicate_clusters(lowercase_ ,lowercase_ ) _UpperCamelCase : List[Any] = {x["base_index"] for cluster in duplicate_clusters for x in cluster} _UpperCamelCase : Any = {} _UpperCamelCase : Union[str, Any] = find_extremes(lowercase_ ,lowercase_ ,lowercase_ ) for extremes in extremes_clusters: for element in extremes: _UpperCamelCase : Dict = element _UpperCamelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() ) _UpperCamelCase : Dict = dataset.filter(lambda lowercase_ ,lowercase_ : idx not in remove_indices ,with_indices=lowercase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _UpperCamelCase : List[str] = element["base_index"] in extreme_dict if element["is_extreme"]: _UpperCamelCase : Any = extreme_dict[element["base_index"]]["copies"] print(F'''Original dataset size: {len(lowercase_ )}''' ) print(F'''Number of duplicate clusters: {len(lowercase_ )}''' ) print(F'''Files in duplicate cluster: {len(lowercase_ )}''' ) print(F'''Unique files in duplicate cluster: {len(lowercase_ )}''' ) print(F'''Filtered dataset size: {len(lowercase_ )}''' ) return ds_filter, duplicate_clusters
310
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCamelCase__ = {"UserAgent": UserAgent().random} def lowercase__ ( lowercase_ ) -> dict: """simple docstring""" _UpperCamelCase : str = script.contents[0] _UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : str ) -> Tuple: _UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/''' _UpperCamelCase : Optional[Any] = self.get_json() def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict: _UpperCamelCase : int = requests.get(self.url , headers=__a ).text _UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : List[Any] ) -> str: return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : str ) -> str: return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: return self.user_data["username"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["full_name"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return self.user_data["biography"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["business_email"] @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["external_url"] @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.user_data["edge_followed_by"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.user_data["edge_follow"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool: return self.user_data["is_verified"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: return self.user_data["is_private"] def lowercase__ ( lowercase_ = "github" ) -> None: """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions _UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data ,lowercase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = InstagramUser("github") print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
310
1
"""simple docstring""" import math def lowercase__ ( lowercase_ ,lowercase_ ) -> float: """simple docstring""" if ( not isinstance(lowercase_ ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def lowercase__ ( lowercase_ ,lowercase_ ) -> float: """simple docstring""" if ( not isinstance(lowercase_ ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
310
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[Any] = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : Any = _sin / (2 * q_factor) _UpperCamelCase : str = (1 - _cos) / 2 _UpperCamelCase : Any = 1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : List[str] = -2 * _cos _UpperCamelCase : Tuple = 1 - alpha _UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : List[str] = tau * frequency / samplerate _UpperCamelCase : str = sin(lowercase_ ) _UpperCamelCase : Optional[Any] = cos(lowercase_ ) _UpperCamelCase : Dict = _sin / (2 * q_factor) _UpperCamelCase : List[Any] = (1 + _cos) / 2 _UpperCamelCase : Optional[int] = -1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : str = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Tuple = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Dict = _sin / 2 _UpperCamelCase : int = 0 _UpperCamelCase : str = -ba _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : Optional[int] = -2 * _cos _UpperCamelCase : Optional[Any] = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : str = tau * frequency / samplerate _UpperCamelCase : Optional[Any] = sin(lowercase_ ) _UpperCamelCase : Optional[int] = cos(lowercase_ ) _UpperCamelCase : int = _sin / (2 * q_factor) _UpperCamelCase : List[str] = 1 - alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : Union[str, Any] = 1 + alpha _UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : int = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : List[Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Optional[int] = 10 ** (gain_db / 40) _UpperCamelCase : str = 1 + alpha * big_a _UpperCamelCase : Union[str, Any] = -2 * _cos _UpperCamelCase : Optional[int] = 1 - alpha * big_a _UpperCamelCase : int = 1 + alpha / big_a _UpperCamelCase : Optional[Any] = -2 * _cos _UpperCamelCase : Any = 1 - alpha / big_a _UpperCamelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Union[str, Any] = tau * frequency / samplerate _UpperCamelCase : Any = sin(lowercase_ ) _UpperCamelCase : Union[str, Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : Any = big_a * (pmc + aaa) _UpperCamelCase : Dict = 2 * big_a * mpc _UpperCamelCase : str = big_a * (pmc - aaa) _UpperCamelCase : Dict = ppmc + aaa _UpperCamelCase : List[Any] = -2 * pmpc _UpperCamelCase : Dict = ppmc - aaa _UpperCamelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[int] = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : Any = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : str = 10 ** (gain_db / 40) _UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : List[Any] = big_a * (ppmc + aaa) _UpperCamelCase : Dict = -2 * big_a * pmpc _UpperCamelCase : Dict = big_a * (ppmc - aaa) _UpperCamelCase : Optional[Any] = pmc + aaa _UpperCamelCase : Any = 2 * mpc _UpperCamelCase : Any = pmc - aaa _UpperCamelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
310
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Union[str, Any] = "transfo-xl" SCREAMING_SNAKE_CASE__ :Union[str, Any] = ["mems"] SCREAMING_SNAKE_CASE__ :List[Any] = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , __a : Any=26_7735 , __a : Union[str, Any]=[2_0000, 4_0000, 20_0000] , __a : int=1024 , __a : Tuple=1024 , __a : List[Any]=16 , __a : Dict=64 , __a : Dict=4096 , __a : Tuple=4 , __a : Optional[Any]=False , __a : Dict=18 , __a : List[str]=1600 , __a : Dict=1000 , __a : Dict=True , __a : List[Any]=True , __a : Dict=0 , __a : List[Any]=-1 , __a : str=True , __a : List[str]=0.1 , __a : Tuple=0.0 , __a : Dict=True , __a : Tuple="normal" , __a : str=0.01 , __a : Any=0.01 , __a : Union[str, Any]=0.02 , __a : Any=1e-5 , __a : Any=0 , **__a : int , ) -> Optional[int]: _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : List[str] = [] self.cutoffs.extend(__a ) if proj_share_all_but_first: _UpperCamelCase : Optional[Any] = [False] + [True] * len(self.cutoffs ) else: _UpperCamelCase : List[str] = [False] + [False] * len(self.cutoffs ) _UpperCamelCase : Optional[int] = d_model _UpperCamelCase : int = d_embed _UpperCamelCase : Union[str, Any] = d_head _UpperCamelCase : str = d_inner _UpperCamelCase : List[str] = div_val _UpperCamelCase : Union[str, Any] = pre_lnorm _UpperCamelCase : Any = n_layer _UpperCamelCase : Union[str, Any] = n_head _UpperCamelCase : Union[str, Any] = mem_len _UpperCamelCase : Tuple = same_length _UpperCamelCase : Dict = attn_type _UpperCamelCase : str = clamp_len _UpperCamelCase : str = sample_softmax _UpperCamelCase : Optional[int] = adaptive _UpperCamelCase : Tuple = dropout _UpperCamelCase : Tuple = dropatt _UpperCamelCase : int = untie_r _UpperCamelCase : Optional[int] = init _UpperCamelCase : List[str] = init_range _UpperCamelCase : Dict = proj_init_std _UpperCamelCase : Dict = init_std _UpperCamelCase : Dict = layer_norm_epsilon super().__init__(eos_token_id=__a , **__a ) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Message copied from Transformer-XL documentation logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, Any] ) -> str: # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
310
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
1
"""simple docstring""" import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=1_024 ,lowercase_=1_024 ,lowercase_=False ,**lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : Any = AutoTokenizer.from_pretrained(lowercase_ ) _UpperCamelCase : int = SeqaSeqDataset(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,type_path="train" ,**lowercase_ ) _UpperCamelCase : Dict = tok.pad_token_id def get_lens(lowercase_ ): _UpperCamelCase : Optional[int] = tqdm( DataLoader(lowercase_ ,batch_size=512 ,num_workers=8 ,shuffle=lowercase_ ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,) _UpperCamelCase : Optional[int] = [] for batch in dl: _UpperCamelCase : Any = batch["input_ids"].ne(lowercase_ ).sum(1 ).tolist() _UpperCamelCase : Any = batch["labels"].ne(lowercase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase_ ,lowercase_ ): max_lens.append(max(lowercase_ ,lowercase_ ) ) else: max_lens.extend(lowercase_ ) return max_lens _UpperCamelCase : Optional[Any] = get_lens(lowercase_ ) _UpperCamelCase : List[Any] = SeqaSeqDataset(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,type_path="val" ,**lowercase_ ) _UpperCamelCase : Any = get_lens(lowercase_ ) pickle_save(lowercase_ ,train_ds.len_file ) pickle_save(lowercase_ ,val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
310
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = prime_factors(lowercase_ ) if is_square_free(lowercase_ ): return -1 if len(lowercase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = "rag" SCREAMING_SNAKE_CASE__ :List[str] = True def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any: super().__init__( bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" ) _UpperCamelCase : str = question_encoder_config.pop("model_type" ) _UpperCamelCase : Tuple = kwargs.pop("generator" ) _UpperCamelCase : str = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : str = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : Optional[int] = reduce_loss _UpperCamelCase : str = label_smoothing _UpperCamelCase : int = exclude_bos_score _UpperCamelCase : List[str] = do_marginalize _UpperCamelCase : Optional[int] = title_sep _UpperCamelCase : Optional[int] = doc_sep _UpperCamelCase : Union[str, Any] = n_docs _UpperCamelCase : Tuple = max_combined_length _UpperCamelCase : Union[str, Any] = dataset _UpperCamelCase : Any = dataset_split _UpperCamelCase : List[str] = index_name _UpperCamelCase : int = retrieval_vector_size _UpperCamelCase : str = retrieval_batch_size _UpperCamelCase : Dict = passages_path _UpperCamelCase : str = index_path _UpperCamelCase : Tuple = use_dummy_dataset _UpperCamelCase : Union[str, Any] = output_retrieved _UpperCamelCase : Optional[Any] = do_deduplication _UpperCamelCase : str = use_cache if self.forced_eos_token_id is None: _UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: _UpperCamelCase : Dict = copy.deepcopy(self.__dict__ ) _UpperCamelCase : List[Any] = self.question_encoder.to_dict() _UpperCamelCase : Tuple = self.generator.to_dict() _UpperCamelCase : Any = self.__class__.model_type return output
310
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ :Dict = True SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True} SCREAMING_SNAKE_CASE__ :Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] _UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCamelCase : str = {"unk_token": "<unk>"} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple: _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: _UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase : Optional[Any] = "lower newer" _UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] _UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : str = tokens + [tokenizer.unk_token] _UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: if not self.test_rust_tokenizer: return _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = "lower newer" # Testing tokenization _UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens _UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens _UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token _UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token] _UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input _UpperCamelCase : Optional[int] = "This is a simple input" _UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Dict = ("This is a simple input", "This is a pair") _UpperCamelCase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: _UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input _UpperCamelCase : Union[str, Any] = "This is a simple input" _UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"] _UpperCamelCase : str = ("This is a simple input", "This is a pair") _UpperCamelCase : List[str] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] _UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id _UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" ) _UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) _UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" ) _UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: _UpperCamelCase : Any = "$$$" _UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) _UpperCamelCase : int = "This is a simple input" _UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id _UpperCamelCase : str = tokenizer(__a ) _UpperCamelCase : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids ) _UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase : Tuple = "Encode this." _UpperCamelCase : List[str] = "This one too please." _UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a ) encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a ) _UpperCamelCase : int = tokenizer.encode_plus( __a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , ) _UpperCamelCase : str = encoded_sequence_dict["input_ids"] _UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(__a ) , len(__a ) ) _UpperCamelCase : Union[str, Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__a ) ] _UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(__a , __a ) @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Any = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) _UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" ) _UpperCamelCase : Optional[Any] = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: _UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Union[str, Any] = tokenizer.encode( __a , ) # Same as above self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[str] = "bos" _UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"] _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : List[Any] = tokenizer.encode( __a , ) # We changed the bos token self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) _UpperCamelCase : Tuple = tokenizer.encode( __a , ) self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
310
1
"""simple docstring""" from __future__ import annotations import requests lowerCamelCase__ = set( "approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split() ) def lowercase__ ( lowercase_ ,lowercase_ = 1 ,lowercase_ = "new" ,lowercase_ = None ) -> dict: """simple docstring""" _UpperCamelCase : str = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): _UpperCamelCase : List[Any] = F'''Invalid search term: {invalid_search_terms}''' raise ValueError(lowercase_ ) _UpperCamelCase : List[Any] = requests.get( F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={"User-agent": "A random string"} ,) if response.status_code == 429: raise requests.HTTPError _UpperCamelCase : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} _UpperCamelCase : List[str] = {} for id_ in range(lowercase_ ): _UpperCamelCase : Dict = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
310
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = load_tool("text-question-answering" ) self.tool.setup() _UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" )
310
1
"""simple docstring""" def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" assert x is not None assert y is not None _UpperCamelCase : List[Any] = len(lowercase_ ) _UpperCamelCase : List[Any] = len(lowercase_ ) # declaring the array for storing the dp values _UpperCamelCase : Union[str, Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 ,m + 1 ): for j in range(1 ,n + 1 ): _UpperCamelCase : int = 1 if x[i - 1] == y[j - 1] else 0 _UpperCamelCase : Dict = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match ) _UpperCamelCase : int = "" _UpperCamelCase, _UpperCamelCase : Optional[int] = m, n while i > 0 and j > 0: _UpperCamelCase : Optional[Any] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: _UpperCamelCase : Tuple = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowerCamelCase__ = "AGGTAB" lowerCamelCase__ = "GXTXAYB" lowerCamelCase__ = 4 lowerCamelCase__ = "GTAB" lowerCamelCase__ , lowerCamelCase__ = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
310
"""simple docstring""" lowerCamelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" _UpperCamelCase : Tuple = [False] * len(lowercase_ ) _UpperCamelCase : Dict = [s] _UpperCamelCase : List[str] = True while queue: _UpperCamelCase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase_ ) _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = u return visited[t] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = [-1] * (len(lowercase_ )) _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ): _UpperCamelCase : int = float("Inf" ) _UpperCamelCase : Optional[Any] = sink while s != source: # Find the minimum value in select path _UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] ) _UpperCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _UpperCamelCase : Union[str, Any] = sink while v != source: _UpperCamelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase : Dict = parent[v] for i in range(len(lowercase_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
310
1
"""simple docstring""" from typing import Any class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any] , __a : Any ) -> Optional[Any]: _UpperCamelCase : int = data _UpperCamelCase : Union[str, Any] = None def __repr__( self : List[Any] ) -> str: return F'''Node({self.data})''' class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int ) -> List[Any]: _UpperCamelCase : Tuple = None def __iter__( self : Union[str, Any] ) -> Any: _UpperCamelCase : List[str] = self.head while node: yield node.data _UpperCamelCase : Union[str, Any] = node.next def __len__( self : Any ) -> int: return sum(1 for _ in self ) def __repr__( self : List[str] ) -> str: return "->".join([str(__a ) for item in self] ) def __getitem__( self : Optional[Any] , __a : int ) -> Any: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Dict , __a : int , __a : Any ) -> None: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) _UpperCamelCase : List[str] = self.head for _ in range(__a ): _UpperCamelCase : str = current.next _UpperCamelCase : Any = data def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> None: self.insert_nth(len(self ) , __a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> None: self.insert_nth(0 , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : int , __a : Any ) -> None: if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) _UpperCamelCase : Any = Node(__a ) if self.head is None: _UpperCamelCase : int = new_node elif index == 0: _UpperCamelCase : List[str] = self.head # link new_node to head _UpperCamelCase : List[Any] = new_node else: _UpperCamelCase : List[str] = self.head for _ in range(index - 1 ): _UpperCamelCase : List[Any] = temp.next _UpperCamelCase : Any = temp.next _UpperCamelCase : List[Any] = new_node def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: # print every node data print(self ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: return self.delete_nth(0 ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) _UpperCamelCase : str = self.head # default first node if index == 0: _UpperCamelCase : Dict = self.head.next else: _UpperCamelCase : Tuple = self.head for _ in range(index - 1 ): _UpperCamelCase : Tuple = temp.next _UpperCamelCase : Any = temp.next _UpperCamelCase : Optional[Any] = temp.next.next return delete_node.data def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> bool: return self.head is None def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None: _UpperCamelCase : Union[str, Any] = None _UpperCamelCase : Dict = self.head while current: # Store the current node's next node. _UpperCamelCase : List[Any] = current.next # Make the current node's next point backwards _UpperCamelCase : Tuple = prev # Make the previous node be the current node _UpperCamelCase : Dict = current # Make the current node the next node (to progress iteration) _UpperCamelCase : Optional[int] = next_node # Return prev in order to put the head at the end _UpperCamelCase : int = prev def lowercase__ ( ) -> None: """simple docstring""" _UpperCamelCase : Tuple = LinkedList() assert linked_list.is_empty() is True assert str(lowercase_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowercase_ ) == i linked_list.insert_nth(lowercase_ ,i + 1 ) assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowercase_ ) == 9 assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): _UpperCamelCase : Optional[int] = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 ,1 ) ) def lowercase__ ( ) -> None: """simple docstring""" _UpperCamelCase : Optional[int] = [ -9, 100, Node(77_345_112 ), "dlrow olleH", 7, 5_555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] _UpperCamelCase : Union[str, Any] = LinkedList() for i in test_input: linked_list.insert_tail(lowercase_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _UpperCamelCase : Optional[int] = linked_list.delete_head() assert result == -9 assert ( str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _UpperCamelCase : Union[str, Any] = linked_list.delete_tail() assert result == 12.2 assert ( str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _UpperCamelCase : int = linked_list.delete_nth(10 ) assert result is None assert ( str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(lowercase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowercase_ ) assert ( str(lowercase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowercase_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__ ( ) -> Tuple: """simple docstring""" from doctest import testmod testmod() _UpperCamelCase : Union[str, Any] = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(lowercase_ ) print("\nReading/changing Node data using indexing:" ) print(F'''Element at Position 1: {linked_list[1]}''' ) _UpperCamelCase : Union[str, Any] = input("Enter New Value: " ).strip() print("New list:" ) print(lowercase_ ) print(F'''length of linked_list is : {len(lowercase_ )}''' ) if __name__ == "__main__": main()
310
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = ["pixel_values"] def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256} _UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" ) _UpperCamelCase : str = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : int = do_center_crop _UpperCamelCase : int = crop_size _UpperCamelCase : Optional[Any] = resample _UpperCamelCase : Dict = do_rescale _UpperCamelCase : Any = rescale_factor _UpperCamelCase : Any = offset _UpperCamelCase : Union[str, Any] = do_normalize _UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" in size: _UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: _UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]: _UpperCamelCase : Any = image.astype(np.floataa ) if offset: _UpperCamelCase : Dict = image - (scale / 2) return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = to_numpy_array(__a ) if do_resize: _UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a ) if do_center_crop: _UpperCamelCase : Dict = self.center_crop(__a , size=__a ) if do_rescale: _UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a ) if do_normalize: _UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a ) _UpperCamelCase : str = to_channel_dimension_format(__a , __a ) return image def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: _UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = offset if offset is not None else self.offset _UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCamelCase : Union[str, Any] = make_batched(__a ) _UpperCamelCase : Optional[Any] = [ [ self._preprocess_image( image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , ) for img in video ] for video in videos ] _UpperCamelCase : List[Any] = {"pixel_values": videos} return BatchFeature(data=__a , tensor_type=__a )
310
1
"""simple docstring""" def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> float: """simple docstring""" _UpperCamelCase : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowercase__ ( ) -> List[Any]: """simple docstring""" print(sum_of_series(1 ,1 ,10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
310
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase__ = True except ImportError: lowerCamelCase__ = False try: from torch.hub import _get_torch_home lowerCamelCase__ = _get_torch_home() except ImportError: lowerCamelCase__ = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) lowerCamelCase__ = os.path.join(torch_cache_home, "transformers") lowerCamelCase__ = "https://cdn.huggingface.co" lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert" lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) lowerCamelCase__ = os.path.join(PATH, "config.yaml") lowerCamelCase__ = os.path.join(PATH, "attributes.txt") lowerCamelCase__ = os.path.join(PATH, "objects.txt") lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase__ = "pytorch_model.bin" lowerCamelCase__ = "config.yaml" def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _UpperCamelCase : Any = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = OrderedDict() with open(lowercase_ ,"rb" ) as f: _UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _UpperCamelCase : List[str] = ckp.pop(lowercase_ ) if isinstance(lowercase_ ,np.ndarray ): _UpperCamelCase : List[Any] = torch.tensor(lowercase_ ) else: assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ ) _UpperCamelCase : Optional[Any] = v return r class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = {} def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any: _UpperCamelCase : Optional[Any] = name _UpperCamelCase : Optional[Any] = level _UpperCamelCase : Union[str, Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _UpperCamelCase : Optional[int] = copy.deepcopy(__a ) _UpperCamelCase : Dict = copy.deepcopy(__a ) if isinstance(__a , __a ): _UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 ) _UpperCamelCase : Optional[Any] = v setattr(self , __a , __a ) _UpperCamelCase : Optional[Any] = d def __repr__( self : List[str] ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int: _UpperCamelCase : Any = val _UpperCamelCase : Optional[Any] = val _UpperCamelCase : Dict = key.split("." ) _UpperCamelCase : int = len(__a ) - 1 _UpperCamelCase : List[str] = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _UpperCamelCase : str = val else: _UpperCamelCase : List[str] = pointer[l] def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self._pointer def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict: with open(F'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]: with open(F'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]: with open(__a ) as stream: _UpperCamelCase : int = load(__a , Loader=__a ) return data def __str__( self : List[str] ) -> Tuple: _UpperCamelCase : List[str] = " " if self._name != "root": _UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n''' else: _UpperCamelCase : Any = "" _UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _UpperCamelCase : Optional[Any] = level return r[:-1] @classmethod def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a ) _UpperCamelCase : str = kwargs.pop("resume_download" , __a ) _UpperCamelCase : Any = kwargs.pop("proxies" , __a ) _UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _UpperCamelCase : Optional[Any] = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _UpperCamelCase : Optional[int] = pretrained_model_name_or_path else: _UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _UpperCamelCase : Optional[int] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _UpperCamelCase : List[Any] = Config.load_yaml(__a ) except EnvironmentError: _UpperCamelCase : Union[str, Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device ) _UpperCamelCase : str = in_tensor.numpy() _UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), ( F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Dict = urlparse(lowercase_ ) return parsed.scheme in ("http", "https") def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str: """simple docstring""" _UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowercase_ ,lowercase_ ): ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() ) elif isinstance(lowercase_ ,lowercase_ ): ua += "; " + user_agent _UpperCamelCase : Any = {"user-agent": ua} if resume_size > 0: _UpperCamelCase : str = "bytes=%d-" % (resume_size,) _UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ ) if response.status_code == 416: # Range not satisfiable return _UpperCamelCase : List[str] = response.headers.get("Content-Length" ) _UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None _UpperCamelCase : Optional[int] = tqdm( unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowercase_ ) ) temp_file.write(lowercase_ ) progress.close() def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple: """simple docstring""" if cache_dir is None: _UpperCamelCase : str = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Dict = str(lowercase_ ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) _UpperCamelCase : Dict = None if not local_files_only: try: _UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ ) if response.status_code == 200: _UpperCamelCase : str = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ ) # get cache path to put the file _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowercase_ ): return cache_path else: _UpperCamelCase : Optional[int] = [ file for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(lowercase_ ) > 0: return os.path.join(lowercase_ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(lowercase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _UpperCamelCase : Dict = cache_path + ".lock" with FileLock(lowercase_ ): # If the download just completed while the lock was activated. if os.path.exists(lowercase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _UpperCamelCase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(lowercase_ ,"a+b" ) as f: yield f _UpperCamelCase : Union[str, Any] = _resumable_file_manager if os.path.exists(lowercase_ ): _UpperCamelCase : str = os.stat(lowercase_ ).st_size else: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ ) _UpperCamelCase : Optional[Any] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,) http_get( lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,) os.replace(temp_file.name ,lowercase_ ) _UpperCamelCase : Optional[int] = {"url": url, "etag": etag} _UpperCamelCase : List[str] = cache_path + ".json" with open(lowercase_ ,"w" ) as meta_file: json.dump(lowercase_ ,lowercase_ ) return cache_path def lowercase__ ( lowercase_ ,lowercase_=None ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = url.encode("utf-8" ) _UpperCamelCase : List[str] = shaaaa(lowercase_ ) _UpperCamelCase : List[str] = url_hash.hexdigest() if etag: _UpperCamelCase : Optional[Any] = etag.encode("utf-8" ) _UpperCamelCase : Optional[Any] = shaaaa(lowercase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str: """simple docstring""" if cache_dir is None: _UpperCamelCase : List[Any] = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if is_remote_url(lowercase_ ): # URL, so get it from the cache (downloading if necessary) _UpperCamelCase : Union[str, Any] = get_from_cache( lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,) elif os.path.exists(lowercase_ ): # File, and it exists. _UpperCamelCase : List[str] = url_or_filename elif urlparse(lowercase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(lowercase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) ) if extract_compressed_file: if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ ) _UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted" _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions _UpperCamelCase : Optional[int] = output_path + ".lock" with FileLock(lowercase_ ): shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ ) os.makedirs(lowercase_ ) if is_zipfile(lowercase_ ): with ZipFile(lowercase_ ,"r" ) as zip_file: zip_file.extractall(lowercase_ ) zip_file.close() elif tarfile.is_tarfile(lowercase_ ): _UpperCamelCase : int = tarfile.open(lowercase_ ) tar_file.extractall(lowercase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) ) return output_path_extracted return output_path def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): with open(lowercase_ ) as f: _UpperCamelCase : Tuple = eval(f.read() ) else: _UpperCamelCase : str = requests.get(lowercase_ ) try: _UpperCamelCase : Optional[int] = requests.json() except Exception: _UpperCamelCase : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: _UpperCamelCase : List[Any] = eval(lowercase_ ) except Exception: _UpperCamelCase : int = data.split("\n" ) req.close() return data def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[Any] = requests.get(lowercase_ ) _UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowercase_ ) with open(lowercase_ ,"rb" ) as stream: _UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ ) _UpperCamelCase : Union[str, Any] = weights.pop("model" ) _UpperCamelCase : Optional[int] = {} for k, v in model.items(): _UpperCamelCase : str = torch.from_numpy(lowercase_ ) if "running_var" in k: _UpperCamelCase : List[Any] = torch.tensor([0] ) _UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" ) _UpperCamelCase : Any = zero return new def lowercase__ ( ) -> Dict: """simple docstring""" print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' ) def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): _UpperCamelCase : Optional[Any] = cva.imread(lowercase_ ) else: _UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ ) assert img is not None, F'''could not connect to: {im}''' _UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": _UpperCamelCase : List[Any] = img[:, :, ::-1] return img def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]: """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
310
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
310
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def lowercase__ ( lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _UpperCamelCase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _UpperCamelCase : Dict = 4 _UpperCamelCase : Tuple = 48 _UpperCamelCase : Optional[Any] = "pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _UpperCamelCase : List[str] = [6, 6, 6, 6] _UpperCamelCase : Dict = 60 _UpperCamelCase : List[Any] = [6, 6, 6, 6] _UpperCamelCase : Union[str, Any] = "pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _UpperCamelCase : List[Any] = 4 _UpperCamelCase : List[str] = "nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _UpperCamelCase : int = 1 _UpperCamelCase : Tuple = 1 _UpperCamelCase : Union[str, Any] = 126 _UpperCamelCase : int = 7 _UpperCamelCase : Union[str, Any] = 255.0 _UpperCamelCase : str = "" return config def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: _UpperCamelCase : Any = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: _UpperCamelCase : str = name.replace("patch_embed.norm" ,"embeddings.patch_embeddings.layernorm" ) if "layers" in name: _UpperCamelCase : List[Any] = name.replace("layers" ,"encoder.stages" ) if "residual_group.blocks" in name: _UpperCamelCase : List[str] = name.replace("residual_group.blocks" ,"layers" ) if "attn.proj" in name: _UpperCamelCase : List[str] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: _UpperCamelCase : List[Any] = name.replace("attn" ,"attention.self" ) if "norm1" in name: _UpperCamelCase : Optional[int] = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: _UpperCamelCase : Optional[Any] = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: _UpperCamelCase : int = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: _UpperCamelCase : Tuple = name.replace("mlp.fc2" ,"output.dense" ) if "q_bias" in name: _UpperCamelCase : Optional[Any] = name.replace("q_bias" ,"query.bias" ) if "k_bias" in name: _UpperCamelCase : str = name.replace("k_bias" ,"key.bias" ) if "v_bias" in name: _UpperCamelCase : Optional[Any] = name.replace("v_bias" ,"value.bias" ) if "cpb_mlp" in name: _UpperCamelCase : int = name.replace("cpb_mlp" ,"continuous_position_bias_mlp" ) if "patch_embed.proj" in name: _UpperCamelCase : List[Any] = name.replace("patch_embed.proj" ,"patch_embed.projection" ) if name == "norm.weight": _UpperCamelCase : Any = "layernorm.weight" if name == "norm.bias": _UpperCamelCase : Any = "layernorm.bias" if "conv_first" in name: _UpperCamelCase : int = name.replace("conv_first" ,"first_convolution" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _UpperCamelCase : Optional[Any] = name.replace("conv_last" ,"final_convolution" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _UpperCamelCase : Tuple = name.replace("conv_before_upsample.0" ,"conv_before_upsample" ) if "upsample.0" in name: _UpperCamelCase : Union[str, Any] = name.replace("upsample.0" ,"upsample.convolution_0" ) if "upsample.2" in name: _UpperCamelCase : Tuple = name.replace("upsample.2" ,"upsample.convolution_1" ) _UpperCamelCase : Optional[int] = "upsample." + name elif config.upsampler == "pixelshuffledirect": _UpperCamelCase : int = name.replace("upsample.0.weight" ,"upsample.conv.weight" ) _UpperCamelCase : Dict = name.replace("upsample.0.bias" ,"upsample.conv.bias" ) else: pass else: _UpperCamelCase : str = "swin2sr." + name return name def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): _UpperCamelCase : List[Any] = orig_state_dict.pop(lowercase_ ) if "qkv" in key: _UpperCamelCase : str = key.split("." ) _UpperCamelCase : List[str] = int(key_split[1] ) _UpperCamelCase : Optional[int] = int(key_split[4] ) _UpperCamelCase : Tuple = config.embed_dim if "weight" in key: _UpperCamelCase : List[str] = val[:dim, :] _UpperCamelCase : int = val[dim : dim * 2, :] _UpperCamelCase : List[str] = val[-dim:, :] else: _UpperCamelCase : int = val[:dim] _UpperCamelCase : str = val[dim : dim * 2] _UpperCamelCase : Union[str, Any] = val[-dim:] pass else: _UpperCamelCase : int = val return orig_state_dict def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = get_config(lowercase_ ) _UpperCamelCase : Optional[Any] = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() _UpperCamelCase : List[str] = torch.hub.load_state_dict_from_url(lowercase_ ,map_location="cpu" ) _UpperCamelCase : Optional[Any] = convert_state_dict(lowercase_ ,lowercase_ ) _UpperCamelCase, _UpperCamelCase : Dict = model.load_state_dict(lowercase_ ,strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("Missing keys when converting: {}".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'''Unexpected key {key} in state_dict''' ) # verify values _UpperCamelCase : Any = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" _UpperCamelCase : Any = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw ).convert("RGB" ) _UpperCamelCase : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _UpperCamelCase : List[Any] = 126 if "Jpeg" in checkpoint_url else 256 _UpperCamelCase : List[Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ), ] ) _UpperCamelCase : Union[str, Any] = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: _UpperCamelCase : Union[str, Any] = pixel_values[:, 0, :, :].unsqueeze(1 ) _UpperCamelCase : Any = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _UpperCamelCase : Dict = torch.Size([1, 3, 512, 512] ) _UpperCamelCase : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _UpperCamelCase : List[Any] = torch.Size([1, 3, 1_024, 1_024] ) _UpperCamelCase : List[str] = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _UpperCamelCase : Union[str, Any] = torch.Size([1, 3, 1_024, 1_024] ) _UpperCamelCase : List[Any] = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _UpperCamelCase : Tuple = torch.Size([1, 3, 512, 512] ) _UpperCamelCase : List[str] = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _UpperCamelCase : Optional[int] = torch.Size([1, 3, 1_024, 1_024] ) _UpperCamelCase : Dict = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowercase_ ,atol=1e-3 ) print("Looks ok!" ) _UpperCamelCase : str = { "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } _UpperCamelCase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'''caidas/{model_name}''' ) processor.push_to_hub(F'''caidas/{model_name}''' ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") lowerCamelCase__ = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
310
"""simple docstring""" from typing import Any def lowercase__ ( lowercase_ ) -> list[Any]: """simple docstring""" if not input_list: return [] _UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list] _UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[str] = b.T _UpperCamelCase : Tuple = np.sum(np.square(lowercase_ ) ,axis=1 ) _UpperCamelCase : List[str] = np.sum(np.square(lowercase_ ) ,axis=0 ) _UpperCamelCase : Union[str, Any] = np.matmul(lowercase_ ,lowercase_ ) _UpperCamelCase : Tuple = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : str = x.reshape(-1 ,3 ) _UpperCamelCase : Optional[Any] = squared_euclidean_distance(lowercase_ ,lowercase_ ) return np.argmin(lowercase_ ,axis=1 ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Union[str, Any] = ["pixel_values"] def __init__( self : List[str] , __a : Optional[Union[List[List[int]], np.ndarray]] = None , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : bool = True , **__a : Optional[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Optional[Any] = size if size is not None else {"height": 256, "width": 256} _UpperCamelCase : str = get_size_dict(__a ) _UpperCamelCase : List[Any] = np.array(__a ) if clusters is not None else None _UpperCamelCase : Optional[Any] = do_resize _UpperCamelCase : Optional[int] = size _UpperCamelCase : Tuple = resample _UpperCamelCase : Dict = do_normalize _UpperCamelCase : Dict = do_color_quantize def __SCREAMING_SNAKE_CASE ( self : str , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( __a , size=(size["height"], size["width"]) , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : np.ndarray , __a : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray: _UpperCamelCase : Any = rescale(image=__a , scale=1 / 1_27.5 , data_format=__a ) _UpperCamelCase : str = image - 1 return image def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[bool] = None , __a : Optional[Union[List[List[int]], np.ndarray]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__a : str , ) -> PIL.Image.Image: _UpperCamelCase : Any = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : str = size if size is not None else self.size _UpperCamelCase : str = get_size_dict(__a ) _UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample _UpperCamelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize _UpperCamelCase : List[str] = clusters if clusters is not None else self.clusters _UpperCamelCase : Dict = np.array(__a ) _UpperCamelCase : Optional[Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. _UpperCamelCase : Union[str, Any] = [to_numpy_array(__a ) for image in images] if do_resize: _UpperCamelCase : Optional[int] = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_normalize: _UpperCamelCase : str = [self.normalize(image=__a ) for image in images] if do_color_quantize: _UpperCamelCase : str = [to_channel_dimension_format(__a , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) _UpperCamelCase : List[Any] = np.array(__a ) _UpperCamelCase : List[str] = color_quantize(__a , __a ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) _UpperCamelCase : List[str] = images.shape[0] _UpperCamelCase : str = images.reshape(__a , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. _UpperCamelCase : Optional[Any] = list(__a ) else: _UpperCamelCase : str = [to_channel_dimension_format(__a , __a ) for image in images] _UpperCamelCase : Optional[Any] = {"input_ids": images} return BatchFeature(data=__a , tensor_type=__a )
310
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = "rag" SCREAMING_SNAKE_CASE__ :List[str] = True def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any: super().__init__( bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" ) _UpperCamelCase : str = question_encoder_config.pop("model_type" ) _UpperCamelCase : Tuple = kwargs.pop("generator" ) _UpperCamelCase : str = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : str = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : Optional[int] = reduce_loss _UpperCamelCase : str = label_smoothing _UpperCamelCase : int = exclude_bos_score _UpperCamelCase : List[str] = do_marginalize _UpperCamelCase : Optional[int] = title_sep _UpperCamelCase : Optional[int] = doc_sep _UpperCamelCase : Union[str, Any] = n_docs _UpperCamelCase : Tuple = max_combined_length _UpperCamelCase : Union[str, Any] = dataset _UpperCamelCase : Any = dataset_split _UpperCamelCase : List[str] = index_name _UpperCamelCase : int = retrieval_vector_size _UpperCamelCase : str = retrieval_batch_size _UpperCamelCase : Dict = passages_path _UpperCamelCase : str = index_path _UpperCamelCase : Tuple = use_dummy_dataset _UpperCamelCase : Union[str, Any] = output_retrieved _UpperCamelCase : Optional[Any] = do_deduplication _UpperCamelCase : str = use_cache if self.forced_eos_token_id is None: _UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: _UpperCamelCase : Dict = copy.deepcopy(self.__dict__ ) _UpperCamelCase : List[Any] = self.question_encoder.to_dict() _UpperCamelCase : Tuple = self.generator.to_dict() _UpperCamelCase : Any = self.__class__.model_type return output
310
1
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' def __init__( self : Dict , __a : VQModel , __a : UNetaDModel , __a : DDIMScheduler ) -> int: super().__init__() self.register_modules(vqvae=__a , unet=__a , scheduler=__a ) @torch.no_grad() def __call__( self : Optional[int] , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : float = 0.0 , __a : int = 50 , __a : Optional[str] = "pil" , __a : bool = True , **__a : int , ) -> Union[Tuple, ImagePipelineOutput]: _UpperCamelCase : Dict = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__a , ) _UpperCamelCase : List[Any] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _UpperCamelCase : Union[str, Any] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__a ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature _UpperCamelCase : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCamelCase : Optional[Any] = {} if accepts_eta: _UpperCamelCase : List[str] = eta for t in self.progress_bar(self.scheduler.timesteps ): _UpperCamelCase : str = self.scheduler.scale_model_input(__a , __a ) # predict the noise residual _UpperCamelCase : Union[str, Any] = self.unet(__a , __a ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCamelCase : Optional[int] = self.scheduler.step(__a , __a , __a , **__a ).prev_sample # decode the image latents with the VAE _UpperCamelCase : Dict = self.vqvae.decode(__a ).sample _UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCamelCase : List[Any] = self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
310
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int: _UpperCamelCase : Tuple = parent _UpperCamelCase : str = batch_size _UpperCamelCase : Tuple = image_size _UpperCamelCase : List[str] = patch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : List[str] = is_training _UpperCamelCase : Any = use_labels _UpperCamelCase : int = hidden_size _UpperCamelCase : List[Any] = num_hidden_layers _UpperCamelCase : Union[str, Any] = num_attention_heads _UpperCamelCase : Optional[int] = intermediate_size _UpperCamelCase : Any = hidden_act _UpperCamelCase : Dict = hidden_dropout_prob _UpperCamelCase : Dict = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = type_sequence_label_size _UpperCamelCase : int = initializer_range _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Any = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2 _UpperCamelCase : Optional[int] = num_patches + 1 def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : Union[str, Any] = None if self.use_labels: _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase : Any = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = ViTModel(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]: _UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Any = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCamelCase : Union[str, Any] = 1 _UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a ) model.to(__a ) model.eval() _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : Dict = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int: _UpperCamelCase : Any = self.type_sequence_label_size _UpperCamelCase : Optional[Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase : Tuple = 1 _UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : List[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: _UpperCamelCase : Dict = self.prepare_config_and_inputs() ( ( _UpperCamelCase ), ( _UpperCamelCase ), ( _UpperCamelCase ), ) : Union[str, Any] = config_and_inputs _UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ :Any = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ :str = True SCREAMING_SNAKE_CASE__ :List[Any] = False SCREAMING_SNAKE_CASE__ :int = False SCREAMING_SNAKE_CASE__ :int = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase : Dict = ViTModelTester(self ) _UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: pass def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCamelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Any = model_class(__a ) _UpperCamelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : List[str] = [*signature.parameters.keys()] _UpperCamelCase : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : List[str] = ViTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: _UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a ) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[Any] = prepare_img() _UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : Dict = model(**__a ) # verify the logits _UpperCamelCase : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) _UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. _UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a ) _UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a ) # verify the logits _UpperCamelCase : int = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , __a ) _UpperCamelCase : int = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: _UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) _UpperCamelCase : int = self.default_image_processor _UpperCamelCase : Dict = prepare_img() _UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _UpperCamelCase : int = model(__a )
310
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: _UpperCamelCase : Any = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" ) _UpperCamelCase : Any = { "input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } _UpperCamelCase : Union[str, Any] = model(__a )["last_hidden_state"] _UpperCamelCase : Optional[Any] = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. _UpperCamelCase : Optional[Any] = tf.convert_to_tensor( [ [ [0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04], [-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44], [-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
310
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[int] = -1 _UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Any = TextStreamer(__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Optional[int] = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Dict = -1 _UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] ) _UpperCamelCase : Tuple = TextIteratorStreamer(__a ) _UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() _UpperCamelCase : Tuple = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict: _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Union[str, Any] = -1 _UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :] _UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Tuple = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" ) _UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a ) _UpperCamelCase : int = -1 _UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a ) model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCamelCase : int = cs.out[:-1] # Remove the final "\n" _UpperCamelCase : int = tokenizer(__a , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[Any] = -1 _UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 ) _UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__a ): _UpperCamelCase : List[str] = "" for new_text in streamer: streamer_text += new_text
310
1
"""simple docstring""" from __future__ import annotations import pandas as pd def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> list[int]: """simple docstring""" _UpperCamelCase : Optional[int] = [0] * no_of_processes _UpperCamelCase : str = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(lowercase_ ): _UpperCamelCase : Optional[int] = burst_time[i] _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = 0 _UpperCamelCase : Dict = 999_999_999 _UpperCamelCase : str = 0 _UpperCamelCase : List[Any] = False # Process until all processes are completed while complete != no_of_processes: for j in range(lowercase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: _UpperCamelCase : int = remaining_time[j] _UpperCamelCase : Union[str, Any] = j _UpperCamelCase : Optional[Any] = True if not check: increment_time += 1 continue remaining_time[short] -= 1 _UpperCamelCase : str = remaining_time[short] if minm == 0: _UpperCamelCase : List[str] = 999_999_999 if remaining_time[short] == 0: complete += 1 _UpperCamelCase : List[Any] = False # Find finish time of current process _UpperCamelCase : Optional[Any] = increment_time + 1 # Calculate waiting time _UpperCamelCase : Any = finish_time - arrival_time[short] _UpperCamelCase : Optional[Any] = finar - burst_time[short] if waiting_time[short] < 0: _UpperCamelCase : Union[str, Any] = 0 # Increment time increment_time += 1 return waiting_time def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> list[int]: """simple docstring""" _UpperCamelCase : Dict = [0] * no_of_processes for i in range(lowercase_ ): _UpperCamelCase : Union[str, Any] = burst_time[i] + waiting_time[i] return turn_around_time def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> None: """simple docstring""" _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : str = 0 for i in range(lowercase_ ): _UpperCamelCase : str = total_waiting_time + waiting_time[i] _UpperCamelCase : int = total_turn_around_time + turn_around_time[i] print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' ) print("Average turn around time =" ,total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("Enter how many process you want to analyze") lowerCamelCase__ = int(input()) lowerCamelCase__ = [0] * no_of_processes lowerCamelCase__ = [0] * no_of_processes lowerCamelCase__ = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("Enter the arrival time and burst time for process:--" + str(i + 1)) lowerCamelCase__ , lowerCamelCase__ = map(int, input().split()) lowerCamelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ = burst_time lowerCamelCase__ = no_of_processes lowerCamelCase__ = waiting_time lowerCamelCase__ = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) lowerCamelCase__ = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ "Process", "BurstTime", "ArrivalTime", "WaitingTime", "TurnAroundTime", ], ) # Printing the dataFrame pd.set_option("display.max_rows", fcfs.shape[0] + 1) print(fcfs)
310
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" with open(lowercase_ ) as metadata_file: _UpperCamelCase : Dict = json.load(lowercase_ ) _UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"] # Load the entity vocab file _UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ ) # add an entry for [MASK2] _UpperCamelCase : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) _UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f: _UpperCamelCase : Tuple = json.load(lowercase_ ) _UpperCamelCase : Optional[int] = "MLukeTokenizer" with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) _UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ ) # Initialize the embeddings of the special tokens _UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0] _UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0] _UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) _UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCamelCase : Optional[Any] = state_dict[bias_name] _UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.''' _UpperCamelCase : List[Any] = state_dict[prefix + matrix_name] _UpperCamelCase : str = state_dict[prefix + matrix_name] _UpperCamelCase : Any = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCamelCase : int = state_dict["entity_predictions.bias"] _UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _UpperCamelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _UpperCamelCase : Union[str, Any] = state_dict[key] else: _UpperCamelCase : Dict = state_dict[key] _UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ ) if set(lowercase_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(lowercase_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" ) _UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _UpperCamelCase : Optional[Any] = (0, 9) _UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : List[str] = model(**lowercase_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 33, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 1, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ) _UpperCamelCase : int = "Tokyo is the capital of <mask>." _UpperCamelCase : List[Any] = (24, 30) _UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : Optional[Any] = model(**lowercase_ ) _UpperCamelCase : int = encoding["input_ids"][0].tolist() _UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase_ ) _UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item() _UpperCamelCase : Tuple = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase_ ) ) model.save_pretrained(lowercase_ ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"] _UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )] _UpperCamelCase : List[str] = {} for entry in data: _UpperCamelCase : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCamelCase : Dict = entity_id break _UpperCamelCase : Dict = F'''{language}:{entity_name}''' _UpperCamelCase : str = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
310
1
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MgpstrTokenizer SCREAMING_SNAKE_CASE__ :str = False SCREAMING_SNAKE_CASE__ :Any = {} SCREAMING_SNAKE_CASE__ :Tuple = False def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: super().setUp() # fmt: off _UpperCamelCase : Optional[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on _UpperCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__a : Optional[int] ) -> Any: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple ) -> int: _UpperCamelCase : List[str] = "tester" _UpperCamelCase : int = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: pass def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: _UpperCamelCase : int = self.get_tokenizers(do_lower_case=__a ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase : Dict = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) _UpperCamelCase : int = tokenizer.encode([special_token] , add_special_tokens=__a ) self.assertEqual(len(__a ) , 1 ) _UpperCamelCase : Union[str, Any] = tokenizer.decode(__a , skip_special_tokens=__a ) self.assertTrue(special_token not in decoded ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: _UpperCamelCase : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase, _UpperCamelCase : Dict = self.get_input_output_texts(__a ) _UpperCamelCase : List[str] = tokenizer.tokenize(__a ) _UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__a ) _UpperCamelCase : List[Any] = tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(__a ) self.assertNotEqual(len(__a ) , 0 ) _UpperCamelCase : Union[str, Any] = tokenizer.decode(__a ) self.assertIsInstance(__a , __a ) self.assertEqual(text_a.replace(" " , "" ) , __a ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def __SCREAMING_SNAKE_CASE ( self : str ) -> str: pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: pass
310
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a ) }
310
1
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_UpperCamelCase ) , "Tatoeba directory does not exist." ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: _UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return TatoebaConverter(save_dir=__a ) @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: self.resolver.convert_models(["heb-eng"] ) @slow def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: _UpperCamelCase, _UpperCamelCase : int = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__a ) assert mmeta["long_pair"] == "heb-eng"
310
"""simple docstring""" from __future__ import annotations from math import pi def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from __future__ import annotations def lowercase__ ( lowercase_ ,lowercase_ ) -> bool: """simple docstring""" _UpperCamelCase : Tuple = get_failure_array(lowercase_ ) # 2) Step through text searching for pattern _UpperCamelCase, _UpperCamelCase : Tuple = 0, 0 # index into text, pattern while i < len(lowercase_ ): if pattern[j] == text[i]: if j == (len(lowercase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCamelCase : Optional[int] = failure[j - 1] continue i += 1 return False def lowercase__ ( lowercase_ ) -> list[int]: """simple docstring""" _UpperCamelCase : Optional[Any] = [0] _UpperCamelCase : Tuple = 0 _UpperCamelCase : str = 1 while j < len(lowercase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCamelCase : Union[str, Any] = failure[i - 1] continue j += 1 failure.append(lowercase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase__ = "abc1abc12" lowerCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc" lowerCamelCase__ = "alskfjaldsk23adsfabcabc" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase__ = "ABABX" lowerCamelCase__ = "ABABZABABYABABX" assert kmp(pattern, text) # Test 3) lowerCamelCase__ = "AAAB" lowerCamelCase__ = "ABAAAAAB" assert kmp(pattern, text) # Test 4) lowerCamelCase__ = "abcdabcy" lowerCamelCase__ = "abcxabcdabxabcdabcdabcy" assert kmp(pattern, text) # Test 5) lowerCamelCase__ = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
310
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( lowercase_ ) -> str: """simple docstring""" if "://" in dataset_path: _UpperCamelCase : List[Any] = dataset_path.split("://" )[1] return dataset_path def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ ) def lowercase__ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCamelCase : Dict = None _UpperCamelCase : str = None _UpperCamelCase : str = threading.Lock()
310
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"audio": Audio()} ) SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"labels": ClassLabel} ) SCREAMING_SNAKE_CASE__ :str = "audio" SCREAMING_SNAKE_CASE__ :str = "labels" def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int ) -> List[Any]: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __a ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) _UpperCamelCase : List[Any] = copy.deepcopy(self ) _UpperCamelCase : List[str] = self.label_schema.copy() _UpperCamelCase : Tuple = features[self.label_column] _UpperCamelCase : Union[str, Any] = label_schema return task_template @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict[str, str]: return { self.audio_column: "audio", self.label_column: "labels", }
310
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
310
1
"""simple docstring""" import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Dict = (DDPMParallelScheduler,) def __SCREAMING_SNAKE_CASE ( self : str , **__a : Optional[Any] ) -> List[str]: _UpperCamelCase : Tuple = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**__a ) return config def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: _UpperCamelCase : str = self.scheduler_classes[0] _UpperCamelCase : Optional[Any] = self.get_scheduler_config() _UpperCamelCase : Dict = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase : str = self.scheduler_classes[0] _UpperCamelCase : str = self.get_scheduler_config() _UpperCamelCase : Union[str, Any] = scheduler_class(**__a ) _UpperCamelCase : Any = len(__a ) _UpperCamelCase : List[Any] = self.dummy_model() _UpperCamelCase : List[str] = self.dummy_sample_deter _UpperCamelCase : str = self.dummy_sample_deter + 0.1 _UpperCamelCase : Tuple = self.dummy_sample_deter - 0.1 _UpperCamelCase : Optional[int] = samplea.shape[0] _UpperCamelCase : Union[str, Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) _UpperCamelCase : Tuple = torch.arange(__a )[0:3, None].repeat(1 , __a ) _UpperCamelCase : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _UpperCamelCase : List[Any] = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _UpperCamelCase : Optional[Any] = torch.sum(torch.abs(__a ) ) _UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 11_53.18_33 ) < 1e-2 assert abs(result_mean.item() - 0.50_05 ) < 1e-3 def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: _UpperCamelCase : List[str] = self.scheduler_classes[0] _UpperCamelCase : str = self.get_scheduler_config() _UpperCamelCase : int = scheduler_class(**__a ) _UpperCamelCase : List[Any] = len(__a ) _UpperCamelCase : int = self.dummy_model() _UpperCamelCase : List[str] = self.dummy_sample_deter _UpperCamelCase : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual _UpperCamelCase : List[Any] = model(__a , __a ) # 2. predict previous mean of sample x_t-1 _UpperCamelCase : Optional[int] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample _UpperCamelCase : Union[str, Any] = pred_prev_sample _UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) ) _UpperCamelCase : List[str] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2 assert abs(result_mean.item() - 0.33_72 ) < 1e-3 def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: _UpperCamelCase : Tuple = self.scheduler_classes[0] _UpperCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCamelCase : Optional[int] = scheduler_class(**__a ) _UpperCamelCase : int = len(__a ) _UpperCamelCase : List[str] = self.dummy_model() _UpperCamelCase : Any = self.dummy_sample_deter _UpperCamelCase : str = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual _UpperCamelCase : Union[str, Any] = model(__a , __a ) # 2. predict previous mean of sample x_t-1 _UpperCamelCase : int = scheduler.step(__a , __a , __a , generator=__a ).prev_sample _UpperCamelCase : Tuple = pred_prev_sample _UpperCamelCase : Optional[Any] = torch.sum(torch.abs(__a ) ) _UpperCamelCase : List[str] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2 assert abs(result_mean.item() - 0.26_31 ) < 1e-3 def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: _UpperCamelCase : Optional[int] = self.scheduler_classes[0] _UpperCamelCase : List[str] = self.get_scheduler_config() _UpperCamelCase : int = scheduler_class(**__a ) _UpperCamelCase : Dict = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__a ) _UpperCamelCase : Any = scheduler.timesteps for i, timestep in enumerate(__a ): if i == len(__a ) - 1: _UpperCamelCase : str = -1 else: _UpperCamelCase : Dict = timesteps[i + 1] _UpperCamelCase : Tuple = scheduler.previous_timestep(__a ) _UpperCamelCase : str = prev_t.item() self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase : Optional[Any] = self.scheduler_classes[0] _UpperCamelCase : Any = self.get_scheduler_config() _UpperCamelCase : Any = scheduler_class(**__a ) _UpperCamelCase : int = [100, 87, 50, 51, 0] with self.assertRaises(__a , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=__a ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase : int = self.scheduler_classes[0] _UpperCamelCase : Tuple = self.get_scheduler_config() _UpperCamelCase : List[Any] = scheduler_class(**__a ) _UpperCamelCase : Tuple = [100, 87, 50, 1, 0] _UpperCamelCase : Any = len(__a ) with self.assertRaises(__a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: _UpperCamelCase : List[str] = self.scheduler_classes[0] _UpperCamelCase : List[str] = self.get_scheduler_config() _UpperCamelCase : Optional[Any] = scheduler_class(**__a ) _UpperCamelCase : str = [scheduler.config.num_train_timesteps] with self.assertRaises( __a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=__a )
310
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100""" lowerCamelCase__ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: lowerCamelCase__ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: lowerCamelCase__ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
310
1
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE__ :Tuple = "AutoImageProcessor" SCREAMING_SNAKE_CASE__ :Optional[Any] = "AutoTokenizer" def __init__( self : Any , __a : List[str] , __a : List[Any] ) -> List[str]: super().__init__(__a , __a ) _UpperCamelCase : Dict = self.image_processor def __call__( self : List[str] , __a : Optional[Any]=None , __a : Any=None , __a : int=None , **__a : int ) -> Dict: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: _UpperCamelCase : Any = self.tokenizer(__a , return_tensors=__a , **__a ) if images is not None: _UpperCamelCase : List[str] = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _UpperCamelCase : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : Union[str, Any] , **__a : Tuple ) -> List[str]: return self.tokenizer.batch_decode(*__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : Dict , **__a : Dict ) -> List[str]: return self.tokenizer.decode(*__a , **__a ) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: return ["input_ids", "attention_mask", "pixel_values"]
310
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl" def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Union[str, Any] = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = max_position_embeddings _UpperCamelCase : Optional[Any] = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[str] = "roberta" def __init__( self : List[str] , __a : Optional[Any]=5_0265 , __a : Tuple=768 , __a : Optional[int]=12 , __a : int=12 , __a : Tuple=3072 , __a : int="gelu" , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : int=512 , __a : Tuple=2 , __a : Any=0.02 , __a : str=1e-1_2 , __a : Any=1 , __a : List[Any]=0 , __a : str=2 , __a : Union[str, Any]="absolute" , __a : int=True , __a : Optional[int]=None , **__a : str , ) -> Union[str, Any]: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : int = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Dict = num_attention_heads _UpperCamelCase : Optional[Any] = hidden_act _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : List[Any] = hidden_dropout_prob _UpperCamelCase : Optional[int] = attention_probs_dropout_prob _UpperCamelCase : Optional[Any] = max_position_embeddings _UpperCamelCase : Any = type_vocab_size _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : str = use_cache _UpperCamelCase : Optional[int] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]: _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int: _UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) import datasets _UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _UpperCamelCase : List[Any] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @require_torch def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3" _UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) _UpperCamelCase : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = "facebook/detr-resnet-50" _UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Dict = "facebook/detr-resnet-50" _UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a ) _UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : Tuple = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: _UpperCamelCase : Tuple = 0.99_85 _UpperCamelCase : List[Any] = "facebook/detr-resnet-50" _UpperCamelCase : List[str] = pipeline("object-detection" , model=__a ) _UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd" _UpperCamelCase : int = 0.99_93 _UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a ) _UpperCamelCase : Union[str, Any] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
310
1
"""simple docstring""" import math def lowercase__ ( lowercase_ ) -> int: """simple docstring""" if not isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Any = F'''Input value of [number={number}] must be an integer''' raise TypeError(lowercase_ ) if number < 1: _UpperCamelCase : Optional[Any] = F'''Input value of [number={number}] must be > 0''' raise ValueError(lowercase_ ) elif number == 1: return 3 elif number == 2: return 5 else: _UpperCamelCase : Any = int(math.log(number // 3 ,2 ) ) + 2 _UpperCamelCase : Optional[int] = [3, 5] _UpperCamelCase : Dict = 2 _UpperCamelCase : Dict = 3 for block in range(1 ,lowercase_ ): for _ in range(lowercase_ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): lowerCamelCase__ = 0 try: lowerCamelCase__ = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
310
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCamelCase__ = {"UserAgent": UserAgent().random} def lowercase__ ( lowercase_ ) -> dict: """simple docstring""" _UpperCamelCase : str = script.contents[0] _UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : str ) -> Tuple: _UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/''' _UpperCamelCase : Optional[Any] = self.get_json() def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict: _UpperCamelCase : int = requests.get(self.url , headers=__a ).text _UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : List[Any] ) -> str: return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : str ) -> str: return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: return self.user_data["username"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["full_name"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return self.user_data["biography"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["business_email"] @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["external_url"] @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.user_data["edge_followed_by"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.user_data["edge_follow"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool: return self.user_data["is_verified"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: return self.user_data["is_private"] def lowercase__ ( lowercase_ = "github" ) -> None: """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions _UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data ,lowercase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = InstagramUser("github") print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
310
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , __a : Tuple , __a : str=7 , __a : int=3 , __a : str=18 , __a : Any=30 , __a : Optional[int]=400 , __a : Tuple=True , __a : Optional[Any]=32 , __a : Union[str, Any]=True , ) -> Optional[Any]: _UpperCamelCase : Optional[Any] = parent _UpperCamelCase : Any = batch_size _UpperCamelCase : int = num_channels _UpperCamelCase : List[str] = image_size _UpperCamelCase : int = min_resolution _UpperCamelCase : Union[str, Any] = max_resolution _UpperCamelCase : Tuple = do_resize _UpperCamelCase : List[Any] = size_divisor _UpperCamelCase : Dict = do_rescale def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = GLPNImageProcessor if is_vision_available() else None def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: _UpperCamelCase : Optional[int] = GLPNImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "size_divisor" ) ) self.assertTrue(hasattr(__a , "resample" ) ) self.assertTrue(hasattr(__a , "do_rescale" ) ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self : str ) -> int: # Initialize image_processing _UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) _UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: # Initialize image_processing _UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) _UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: # Initialize image_processing _UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) _UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
310
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[Any] = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : Any = _sin / (2 * q_factor) _UpperCamelCase : str = (1 - _cos) / 2 _UpperCamelCase : Any = 1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : List[str] = -2 * _cos _UpperCamelCase : Tuple = 1 - alpha _UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : List[str] = tau * frequency / samplerate _UpperCamelCase : str = sin(lowercase_ ) _UpperCamelCase : Optional[Any] = cos(lowercase_ ) _UpperCamelCase : Dict = _sin / (2 * q_factor) _UpperCamelCase : List[Any] = (1 + _cos) / 2 _UpperCamelCase : Optional[int] = -1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : str = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Tuple = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Dict = _sin / 2 _UpperCamelCase : int = 0 _UpperCamelCase : str = -ba _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : Optional[int] = -2 * _cos _UpperCamelCase : Optional[Any] = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : str = tau * frequency / samplerate _UpperCamelCase : Optional[Any] = sin(lowercase_ ) _UpperCamelCase : Optional[int] = cos(lowercase_ ) _UpperCamelCase : int = _sin / (2 * q_factor) _UpperCamelCase : List[str] = 1 - alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : Union[str, Any] = 1 + alpha _UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : int = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : List[Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Optional[int] = 10 ** (gain_db / 40) _UpperCamelCase : str = 1 + alpha * big_a _UpperCamelCase : Union[str, Any] = -2 * _cos _UpperCamelCase : Optional[int] = 1 - alpha * big_a _UpperCamelCase : int = 1 + alpha / big_a _UpperCamelCase : Optional[Any] = -2 * _cos _UpperCamelCase : Any = 1 - alpha / big_a _UpperCamelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Union[str, Any] = tau * frequency / samplerate _UpperCamelCase : Any = sin(lowercase_ ) _UpperCamelCase : Union[str, Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : Any = big_a * (pmc + aaa) _UpperCamelCase : Dict = 2 * big_a * mpc _UpperCamelCase : str = big_a * (pmc - aaa) _UpperCamelCase : Dict = ppmc + aaa _UpperCamelCase : List[Any] = -2 * pmpc _UpperCamelCase : Dict = ppmc - aaa _UpperCamelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[int] = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : Any = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : str = 10 ** (gain_db / 40) _UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : List[Any] = big_a * (ppmc + aaa) _UpperCamelCase : Dict = -2 * big_a * pmpc _UpperCamelCase : Dict = big_a * (ppmc - aaa) _UpperCamelCase : Optional[Any] = pmc + aaa _UpperCamelCase : Any = 2 * mpc _UpperCamelCase : Any = pmc - aaa _UpperCamelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
310
1
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCamelCase__ = 2 class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any , *, # begin keyword-only arguments __a : Optional[Any]="<s>" , __a : Dict="<pad>" , __a : Optional[int]="</s>" , __a : List[str]="<unk>" , __a : Dict=None , ) -> Tuple: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = bos, unk, pad, eos _UpperCamelCase : List[str] = [] _UpperCamelCase : List[str] = [] _UpperCamelCase : int = {} _UpperCamelCase : Optional[int] = self.add_symbol(__a ) _UpperCamelCase : List[str] = self.add_symbol(__a ) _UpperCamelCase : Tuple = self.add_symbol(__a ) _UpperCamelCase : int = self.add_symbol(__a ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__a ) _UpperCamelCase : Union[str, Any] = len(self.symbols ) def __eq__( self : Optional[int] , __a : Dict ) -> Tuple: return self.indices == other.indices def __getitem__( self : Optional[int] , __a : int ) -> str: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Optional[int] ) -> str: return len(self.symbols ) def __contains__( self : Optional[Any] , __a : List[Any] ) -> List[str]: return sym in self.indices @classmethod def __SCREAMING_SNAKE_CASE ( cls : Tuple , __a : int ) -> List[Any]: _UpperCamelCase : List[str] = cls() d.add_from_file(__a ) return d def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] , __a : Any=1 , __a : str=False ) -> Optional[int]: if word in self.indices and not overwrite: _UpperCamelCase : Optional[int] = self.indices[word] _UpperCamelCase : str = self.count[idx] + n return idx else: _UpperCamelCase : Any = len(self.symbols ) _UpperCamelCase : Optional[Any] = idx self.symbols.append(__a ) self.count.append(__a ) return idx def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, Any] ) -> Optional[Any]: return 0 def __SCREAMING_SNAKE_CASE ( self : str , __a : str ) -> Any: if isinstance(__a , __a ): try: with open(__a , "r" , encoding="utf-8" ) as fd: self.add_from_file(__a ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__a ) ) return _UpperCamelCase : int = f.readlines() _UpperCamelCase : Dict = self._load_meta(__a ) for line in lines[indices_start_line:]: try: _UpperCamelCase, _UpperCamelCase : List[str] = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": _UpperCamelCase : str = True _UpperCamelCase, _UpperCamelCase : Optional[int] = line.rsplit(" " , 1 ) else: _UpperCamelCase : Optional[int] = False _UpperCamelCase : Optional[int] = int(__a ) _UpperCamelCase : Tuple = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(__a ) ) self.add_symbol(__a , n=__a , overwrite=__a ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : str = dict((re.sub(r"@@$" ,"" ,lowercase_ ), v) if k.endswith("@@" ) else (re.sub(r"$" ,"</w>" ,lowercase_ ), v) for k, v in d.items() ) _UpperCamelCase : Dict = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] _UpperCamelCase : Any = d[k] # restore return da def lowercase__ ( lowercase_ ,lowercase_ ) -> Any: """simple docstring""" if not os.path.exists(lowercase_ ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models _UpperCamelCase : str = os.path.join(lowercase_ ,"checkpoint.pt" ) if not os.path.isfile(lowercase_ ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) _UpperCamelCase : int = torch.load(lowercase_ ,map_location="cpu" ) _UpperCamelCase : Optional[int] = chkpt["cfg"]["model"] # dicts _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"dict.txt" ) if not os.path.isfile(lowercase_ ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) _UpperCamelCase : int = Dictionary.load(lowercase_ ) _UpperCamelCase : Optional[Any] = rewrite_dict_keys(src_dict.indices ) _UpperCamelCase : Tuple = len(lowercase_ ) _UpperCamelCase : Optional[Any] = os.path.join(lowercase_ ,VOCAB_FILES_NAMES["vocab_file"] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(lowercase_ ,ensure_ascii=lowercase_ ,indent=lowercase_ ) ) # merges_file (bpecodes) _UpperCamelCase : Optional[Any] = os.path.join(lowercase_ ,"bpecodes" ) if not os.path.isfile(lowercase_ ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(lowercase_ ,lowercase_ ) # model config _UpperCamelCase : Any = os.path.join(lowercase_ ,"config.json" ) _UpperCamelCase : List[str] = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1e-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(lowercase_ ,ensure_ascii=lowercase_ ,indent=lowercase_ ) ) # tokenizer config _UpperCamelCase : Optional[int] = os.path.join(lowercase_ ,lowercase_ ) _UpperCamelCase : str = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1_024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(lowercase_ ,ensure_ascii=lowercase_ ,indent=lowercase_ ) ) # model _UpperCamelCase : Tuple = chkpt["model"] # remove unneeded keys _UpperCamelCase : Any = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(lowercase_ ,lowercase_ ) _UpperCamelCase : List[Any] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): _UpperCamelCase : int = model_state_dict.pop(lowercase_ ) else: _UpperCamelCase : Optional[int] = model_state_dict.pop(lowercase_ ) _UpperCamelCase : Tuple = BioGptConfig.from_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = BioGptForCausalLM(lowercase_ ) # check that it loads ok model_new.load_state_dict(lowercase_ ) # save _UpperCamelCase : List[str] = os.path.join(lowercase_ ,lowercase_ ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(lowercase_ ,lowercase_ ) print("Conversion is done!" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCamelCase__ = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
310
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch lowerCamelCase__ = logging.get_logger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , __a : str=False , __a : List[str]=False , __a : str=6.0 , __a : Optional[Any]=None , __a : List[str]=False , __a : Optional[Any]=False , __a : Tuple=None , __a : List[str]="fp4" , __a : List[str]=False , **__a : Dict , ) -> Optional[Any]: _UpperCamelCase : int = load_in_abit _UpperCamelCase : Tuple = load_in_abit _UpperCamelCase : Tuple = llm_inta_threshold _UpperCamelCase : Tuple = llm_inta_skip_modules _UpperCamelCase : Union[str, Any] = llm_inta_enable_fpaa_cpu_offload _UpperCamelCase : Dict = llm_inta_has_fpaa_weight _UpperCamelCase : Tuple = bnb_abit_quant_type _UpperCamelCase : Tuple = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: _UpperCamelCase : Optional[Any] = torch.floataa elif isinstance(__a , __a ): _UpperCamelCase : List[str] = getattr(__a , __a ) elif isinstance(__a , torch.dtype ): _UpperCamelCase : Optional[int] = bnb_abit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" ) self.post_init() def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: if not isinstance(self.llm_inta_threshold , __a ): raise ValueError("llm_int8_threshold must be a float" ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __a ): raise ValueError("llm_int8_skip_modules must be a list of strings" ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __a ): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" ) if not isinstance(self.llm_inta_has_fpaa_weight , __a ): raise ValueError("llm_int8_has_fp16_weight must be a boolean" ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" ) if not isinstance(self.bnb_abit_quant_type , __a ): raise ValueError("bnb_4bit_quant_type must be a string" ) if not isinstance(self.bnb_abit_use_double_quant , __a ): raise ValueError("bnb_4bit_use_double_quant must be a boolean" ) if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: return self.load_in_abit or self.load_in_abit def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def __SCREAMING_SNAKE_CASE ( cls : int , __a : str , __a : List[Any] , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Union[str, Any] = cls(**__a ) _UpperCamelCase : Optional[Any] = [] for key, value in kwargs.items(): if hasattr(__a , __a ): setattr(__a , __a , __a ) to_remove.append(__a ) for key in to_remove: kwargs.pop(__a , __a ) if return_unused_kwargs: return config, kwargs else: return config def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, os.PathLike] ) -> int: with open(__a , "w" , encoding="utf-8" ) as writer: _UpperCamelCase : int = self.to_dict() _UpperCamelCase : Optional[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + "\n" writer.write(__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, Any]: _UpperCamelCase : int = copy.deepcopy(self.__dict__ ) _UpperCamelCase : List[Any] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1] return output def __repr__( self : Tuple ) -> Optional[int]: return F'''{self.__class__.__name__} {self.to_json_string()}''' def __SCREAMING_SNAKE_CASE ( self : Dict , __a : bool = True ) -> str: if use_diff is True: _UpperCamelCase : Any = self.to_diff_dict() else: _UpperCamelCase : Tuple = self.to_dict() return json.dumps(__a , indent=2 , sort_keys=__a ) + "\n" def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict[str, Any]: _UpperCamelCase : Union[str, Any] = self.to_dict() # get the default config dict _UpperCamelCase : str = BitsAndBytesConfig().to_dict() _UpperCamelCase : List[Any] = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: _UpperCamelCase : List[str] = value return serializable_config_dict
310
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = prime_factors(lowercase_ ) if is_square_free(lowercase_ ): return -1 if len(lowercase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
310
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ :Dict = True SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True} SCREAMING_SNAKE_CASE__ :Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] _UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCamelCase : str = {"unk_token": "<unk>"} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple: _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: _UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase : Optional[Any] = "lower newer" _UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] _UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : str = tokens + [tokenizer.unk_token] _UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: if not self.test_rust_tokenizer: return _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = "lower newer" # Testing tokenization _UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens _UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens _UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token _UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token] _UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input _UpperCamelCase : Optional[int] = "This is a simple input" _UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Dict = ("This is a simple input", "This is a pair") _UpperCamelCase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: _UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input _UpperCamelCase : Union[str, Any] = "This is a simple input" _UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"] _UpperCamelCase : str = ("This is a simple input", "This is a pair") _UpperCamelCase : List[str] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] _UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id _UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" ) _UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) _UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" ) _UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: _UpperCamelCase : Any = "$$$" _UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) _UpperCamelCase : int = "This is a simple input" _UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id _UpperCamelCase : str = tokenizer(__a ) _UpperCamelCase : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids ) _UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase : Tuple = "Encode this." _UpperCamelCase : List[str] = "This one too please." _UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a ) encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a ) _UpperCamelCase : int = tokenizer.encode_plus( __a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , ) _UpperCamelCase : str = encoded_sequence_dict["input_ids"] _UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(__a ) , len(__a ) ) _UpperCamelCase : Union[str, Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__a ) ] _UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(__a , __a ) @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Any = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) _UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" ) _UpperCamelCase : Optional[Any] = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: _UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Union[str, Any] = tokenizer.encode( __a , ) # Same as above self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[str] = "bos" _UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"] _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : List[Any] = tokenizer.encode( __a , ) # We changed the bos token self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) _UpperCamelCase : Tuple = tokenizer.encode( __a , ) self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
310
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } lowerCamelCase__ = { "b0": { "hidden_dim": 1280, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 224, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 1280, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 240, "dropout_rate": 0.2, "dw_padding": [16], }, "b2": { "hidden_dim": 1408, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 260, "dropout_rate": 0.3, "dw_padding": [5, 8, 16], }, "b3": { "hidden_dim": 1536, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 300, "dropout_rate": 0.3, "dw_padding": [5, 18], }, "b4": { "hidden_dim": 1792, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 380, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 2048, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 456, "dropout_rate": 0.4, "dw_padding": [13, 27], }, "b6": { "hidden_dim": 2304, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 528, "dropout_rate": 0.5, "dw_padding": [31], }, "b7": { "hidden_dim": 2560, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 600, "dropout_rate": 0.5, "dw_padding": [18], }, } def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[str] = EfficientNetConfig() _UpperCamelCase : Optional[int] = CONFIG_MAP[model_name]["hidden_dim"] _UpperCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"] _UpperCamelCase : Optional[Any] = CONFIG_MAP[model_name]["depth_coef"] _UpperCamelCase : Optional[int] = CONFIG_MAP[model_name]["image_size"] _UpperCamelCase : Tuple = CONFIG_MAP[model_name]["dropout_rate"] _UpperCamelCase : Optional[Any] = CONFIG_MAP[model_name]["dw_padding"] _UpperCamelCase : Optional[Any] = "huggingface/label-files" _UpperCamelCase : Dict = "imagenet-1k-id2label.json" _UpperCamelCase : Union[str, Any] = 1_000 _UpperCamelCase : List[str] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) ) _UpperCamelCase : Tuple = {int(lowercase_ ): v for k, v in idalabel.items()} _UpperCamelCase : List[Any] = idalabel _UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCamelCase : Optional[Any] = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw ) return im def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = CONFIG_MAP[model_name]["image_size"] _UpperCamelCase : int = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] ,do_center_crop=lowercase_ ,) return preprocessor def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : List[str] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] _UpperCamelCase : Dict = sorted(set(lowercase_ ) ) _UpperCamelCase : Dict = len(lowercase_ ) _UpperCamelCase : Optional[int] = {b: str(lowercase_ ) for b, i in zip(lowercase_ ,range(lowercase_ ) )} _UpperCamelCase : Any = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: _UpperCamelCase : Dict = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) _UpperCamelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: _UpperCamelCase : Optional[int] = "efficientnet." + item[1] _UpperCamelCase : Optional[int] = "classifier.weight" _UpperCamelCase : Optional[Any] = "classifier.bias" return key_mapping def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCamelCase : int = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCamelCase : Union[str, Any] = torch.from_numpy(lowercase_ ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCamelCase : Optional[Any] = torch.from_numpy(lowercase_ ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCamelCase : Dict = torch.from_numpy(np.transpose(lowercase_ ) ) else: _UpperCamelCase : str = torch.from_numpy(lowercase_ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase_ ) @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : List[Any] = model_classes[model_name]( include_top=lowercase_ ,weights="imagenet" ,input_tensor=lowercase_ ,input_shape=lowercase_ ,pooling=lowercase_ ,classes=1_000 ,classifier_activation="softmax" ,) _UpperCamelCase : Tuple = original_model.trainable_variables _UpperCamelCase : List[str] = original_model.non_trainable_variables _UpperCamelCase : List[Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCamelCase : Optional[int] = param.numpy() _UpperCamelCase : Any = list(tf_params.keys() ) # Load HuggingFace model _UpperCamelCase : List[Any] = get_efficientnet_config(lowercase_ ) _UpperCamelCase : Tuple = EfficientNetForImageClassification(lowercase_ ).eval() _UpperCamelCase : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) _UpperCamelCase : Optional[int] = rename_keys(lowercase_ ) replace_params(lowercase_ ,lowercase_ ,lowercase_ ) # Initialize preprocessor and preprocess input image _UpperCamelCase : Union[str, Any] = convert_image_processor(lowercase_ ) _UpperCamelCase : Optional[int] = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCamelCase : str = hf_model(**lowercase_ ) _UpperCamelCase : Union[str, Any] = outputs.logits.detach().numpy() # Original model inference _UpperCamelCase : Dict = False _UpperCamelCase : str = CONFIG_MAP[model_name]["image_size"] _UpperCamelCase : Tuple = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCamelCase : str = image.img_to_array(lowercase_ ) _UpperCamelCase : Optional[Any] = np.expand_dims(lowercase_ ,axis=0 ) _UpperCamelCase : Dict = original_model.predict(lowercase_ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase_ ,lowercase_ ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase_ ): os.mkdir(lowercase_ ) # Save converted model and image processor hf_model.save_pretrained(lowercase_ ) preprocessor.save_pretrained(lowercase_ ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) _UpperCamelCase : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase_ ) hf_model.push_to_hub(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="b0", type=str, help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", ) parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") lowerCamelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
310
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = load_tool("text-question-answering" ) self.tool.setup() _UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" )
310
1
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] lowerCamelCase__ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = " Hello world! cécé herlolip" lowerCamelCase__ = [ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"), ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"), ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"), ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"), ] def lowercase__ ( lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : Any = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(lowercase_ ,lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : Union[str, Any] = dct.pop(lowercase_ ) _UpperCamelCase : int = val def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = torch.load(lowercase_ ,map_location="cpu" ) _UpperCamelCase : Dict = torch.hub.load("pytorch/fairseq" ,"bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase, _UpperCamelCase : Optional[Any] = emb.weight.shape _UpperCamelCase : str = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ ) _UpperCamelCase : int = emb.weight.data return lin_layer @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> List[Any]: """simple docstring""" if not os.path.exists(lowercase_ ): _UpperCamelCase : Tuple = torch.hub.load("pytorch/fairseq" ,lowercase_ ).eval() else: _UpperCamelCase : Dict = load_xsum_checkpoint(lowercase_ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _UpperCamelCase : Tuple = checkpoint_path.replace("." ,"-" ) _UpperCamelCase : Dict = BartConfig.from_pretrained(lowercase_ ) _UpperCamelCase : List[str] = bart.encode(lowercase_ ).unsqueeze(0 ) _UpperCamelCase : List[Any] = BartTokenizer.from_pretrained(lowercase_ ).encode(lowercase_ ,return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(lowercase_ ,lowercase_ ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _UpperCamelCase : List[str] = bart.state_dict() remove_ignore_keys_(lowercase_ ) _UpperCamelCase : Any = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(lowercase_ ,lowercase_ ,lowercase_ ) _UpperCamelCase : Optional[int] = BartForSequenceClassification(lowercase_ ).eval() model.load_state_dict(lowercase_ ) _UpperCamelCase : List[Any] = bart.predict("mnli" ,lowercase_ ,return_logits=lowercase_ ) _UpperCamelCase : Dict = model(lowercase_ )[0] # logits else: # no classification heads to worry about _UpperCamelCase : Optional[Any] = bart.model.state_dict() remove_ignore_keys_(lowercase_ ) _UpperCamelCase : Any = state_dict["decoder.embed_tokens.weight"] _UpperCamelCase : Optional[Any] = bart.extract_features(lowercase_ ) if hf_checkpoint_name == "facebook/bart-large": _UpperCamelCase : Dict = BartModel(lowercase_ ).eval() model.load_state_dict(lowercase_ ) _UpperCamelCase : Dict = model(lowercase_ ).model[0] else: _UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(lowercase_ ).eval() # an existing summarization ckpt model.model.load_state_dict(lowercase_ ) if hasattr(lowercase_ ,"lm_head" ): _UpperCamelCase : Tuple = make_linear_from_emb(model.model.shared ) _UpperCamelCase : str = model.model(lowercase_ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
310
"""simple docstring""" lowerCamelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" _UpperCamelCase : Tuple = [False] * len(lowercase_ ) _UpperCamelCase : Dict = [s] _UpperCamelCase : List[str] = True while queue: _UpperCamelCase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase_ ) _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = u return visited[t] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = [-1] * (len(lowercase_ )) _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ): _UpperCamelCase : int = float("Inf" ) _UpperCamelCase : Optional[Any] = sink while s != source: # Find the minimum value in select path _UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] ) _UpperCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _UpperCamelCase : Union[str, Any] = sink while v != source: _UpperCamelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase : Dict = parent[v] for i in range(len(lowercase_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
310
1
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = prime_factors(lowercase_ ) if is_square_free(lowercase_ ): return -1 if len(lowercase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
310
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = ["pixel_values"] def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256} _UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" ) _UpperCamelCase : str = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : int = do_center_crop _UpperCamelCase : int = crop_size _UpperCamelCase : Optional[Any] = resample _UpperCamelCase : Dict = do_rescale _UpperCamelCase : Any = rescale_factor _UpperCamelCase : Any = offset _UpperCamelCase : Union[str, Any] = do_normalize _UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" in size: _UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: _UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]: _UpperCamelCase : Any = image.astype(np.floataa ) if offset: _UpperCamelCase : Dict = image - (scale / 2) return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = to_numpy_array(__a ) if do_resize: _UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a ) if do_center_crop: _UpperCamelCase : Dict = self.center_crop(__a , size=__a ) if do_rescale: _UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a ) if do_normalize: _UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a ) _UpperCamelCase : str = to_channel_dimension_format(__a , __a ) return image def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: _UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = offset if offset is not None else self.offset _UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCamelCase : Union[str, Any] = make_batched(__a ) _UpperCamelCase : Optional[Any] = [ [ self._preprocess_image( image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , ) for img in video ] for video in videos ] _UpperCamelCase : List[Any] = {"pixel_values": videos} return BatchFeature(data=__a , tensor_type=__a )
310
1
"""simple docstring""" import numpy as np import datasets lowerCamelCase__ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n" lowerCamelCase__ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n" lowerCamelCase__ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __SCREAMING_SNAKE_CASE ( self : str , __a : str , __a : Dict ) -> Union[str, Any]: # convert to numpy arrays _UpperCamelCase : int = np.array(__a ) _UpperCamelCase : List[str] = np.array(__a ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction _UpperCamelCase : Optional[int] = X - np.mean(__a ) _UpperCamelCase : Dict = np.cov(reference_distribution.T ) try: _UpperCamelCase : Optional[Any] = np.linalg.inv(__a ) except np.linalg.LinAlgError: _UpperCamelCase : Any = np.linalg.pinv(__a ) _UpperCamelCase : str = np.dot(__a , __a ) _UpperCamelCase : int = np.dot(__a , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
310
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase__ = True except ImportError: lowerCamelCase__ = False try: from torch.hub import _get_torch_home lowerCamelCase__ = _get_torch_home() except ImportError: lowerCamelCase__ = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) lowerCamelCase__ = os.path.join(torch_cache_home, "transformers") lowerCamelCase__ = "https://cdn.huggingface.co" lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert" lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) lowerCamelCase__ = os.path.join(PATH, "config.yaml") lowerCamelCase__ = os.path.join(PATH, "attributes.txt") lowerCamelCase__ = os.path.join(PATH, "objects.txt") lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase__ = "pytorch_model.bin" lowerCamelCase__ = "config.yaml" def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _UpperCamelCase : Any = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = OrderedDict() with open(lowercase_ ,"rb" ) as f: _UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _UpperCamelCase : List[str] = ckp.pop(lowercase_ ) if isinstance(lowercase_ ,np.ndarray ): _UpperCamelCase : List[Any] = torch.tensor(lowercase_ ) else: assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ ) _UpperCamelCase : Optional[Any] = v return r class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = {} def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any: _UpperCamelCase : Optional[Any] = name _UpperCamelCase : Optional[Any] = level _UpperCamelCase : Union[str, Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _UpperCamelCase : Optional[int] = copy.deepcopy(__a ) _UpperCamelCase : Dict = copy.deepcopy(__a ) if isinstance(__a , __a ): _UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 ) _UpperCamelCase : Optional[Any] = v setattr(self , __a , __a ) _UpperCamelCase : Optional[Any] = d def __repr__( self : List[str] ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int: _UpperCamelCase : Any = val _UpperCamelCase : Optional[Any] = val _UpperCamelCase : Dict = key.split("." ) _UpperCamelCase : int = len(__a ) - 1 _UpperCamelCase : List[str] = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _UpperCamelCase : str = val else: _UpperCamelCase : List[str] = pointer[l] def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self._pointer def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict: with open(F'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]: with open(F'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]: with open(__a ) as stream: _UpperCamelCase : int = load(__a , Loader=__a ) return data def __str__( self : List[str] ) -> Tuple: _UpperCamelCase : List[str] = " " if self._name != "root": _UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n''' else: _UpperCamelCase : Any = "" _UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _UpperCamelCase : Optional[Any] = level return r[:-1] @classmethod def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a ) _UpperCamelCase : str = kwargs.pop("resume_download" , __a ) _UpperCamelCase : Any = kwargs.pop("proxies" , __a ) _UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _UpperCamelCase : Optional[Any] = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _UpperCamelCase : Optional[int] = pretrained_model_name_or_path else: _UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _UpperCamelCase : Optional[int] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _UpperCamelCase : List[Any] = Config.load_yaml(__a ) except EnvironmentError: _UpperCamelCase : Union[str, Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device ) _UpperCamelCase : str = in_tensor.numpy() _UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), ( F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Dict = urlparse(lowercase_ ) return parsed.scheme in ("http", "https") def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str: """simple docstring""" _UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowercase_ ,lowercase_ ): ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() ) elif isinstance(lowercase_ ,lowercase_ ): ua += "; " + user_agent _UpperCamelCase : Any = {"user-agent": ua} if resume_size > 0: _UpperCamelCase : str = "bytes=%d-" % (resume_size,) _UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ ) if response.status_code == 416: # Range not satisfiable return _UpperCamelCase : List[str] = response.headers.get("Content-Length" ) _UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None _UpperCamelCase : Optional[int] = tqdm( unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowercase_ ) ) temp_file.write(lowercase_ ) progress.close() def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple: """simple docstring""" if cache_dir is None: _UpperCamelCase : str = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Dict = str(lowercase_ ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) _UpperCamelCase : Dict = None if not local_files_only: try: _UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ ) if response.status_code == 200: _UpperCamelCase : str = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ ) # get cache path to put the file _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowercase_ ): return cache_path else: _UpperCamelCase : Optional[int] = [ file for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(lowercase_ ) > 0: return os.path.join(lowercase_ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(lowercase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _UpperCamelCase : Dict = cache_path + ".lock" with FileLock(lowercase_ ): # If the download just completed while the lock was activated. if os.path.exists(lowercase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _UpperCamelCase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(lowercase_ ,"a+b" ) as f: yield f _UpperCamelCase : Union[str, Any] = _resumable_file_manager if os.path.exists(lowercase_ ): _UpperCamelCase : str = os.stat(lowercase_ ).st_size else: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ ) _UpperCamelCase : Optional[Any] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,) http_get( lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,) os.replace(temp_file.name ,lowercase_ ) _UpperCamelCase : Optional[int] = {"url": url, "etag": etag} _UpperCamelCase : List[str] = cache_path + ".json" with open(lowercase_ ,"w" ) as meta_file: json.dump(lowercase_ ,lowercase_ ) return cache_path def lowercase__ ( lowercase_ ,lowercase_=None ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = url.encode("utf-8" ) _UpperCamelCase : List[str] = shaaaa(lowercase_ ) _UpperCamelCase : List[str] = url_hash.hexdigest() if etag: _UpperCamelCase : Optional[Any] = etag.encode("utf-8" ) _UpperCamelCase : Optional[Any] = shaaaa(lowercase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str: """simple docstring""" if cache_dir is None: _UpperCamelCase : List[Any] = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if is_remote_url(lowercase_ ): # URL, so get it from the cache (downloading if necessary) _UpperCamelCase : Union[str, Any] = get_from_cache( lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,) elif os.path.exists(lowercase_ ): # File, and it exists. _UpperCamelCase : List[str] = url_or_filename elif urlparse(lowercase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(lowercase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) ) if extract_compressed_file: if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ ) _UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted" _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions _UpperCamelCase : Optional[int] = output_path + ".lock" with FileLock(lowercase_ ): shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ ) os.makedirs(lowercase_ ) if is_zipfile(lowercase_ ): with ZipFile(lowercase_ ,"r" ) as zip_file: zip_file.extractall(lowercase_ ) zip_file.close() elif tarfile.is_tarfile(lowercase_ ): _UpperCamelCase : int = tarfile.open(lowercase_ ) tar_file.extractall(lowercase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) ) return output_path_extracted return output_path def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): with open(lowercase_ ) as f: _UpperCamelCase : Tuple = eval(f.read() ) else: _UpperCamelCase : str = requests.get(lowercase_ ) try: _UpperCamelCase : Optional[int] = requests.json() except Exception: _UpperCamelCase : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: _UpperCamelCase : List[Any] = eval(lowercase_ ) except Exception: _UpperCamelCase : int = data.split("\n" ) req.close() return data def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[Any] = requests.get(lowercase_ ) _UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowercase_ ) with open(lowercase_ ,"rb" ) as stream: _UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ ) _UpperCamelCase : Union[str, Any] = weights.pop("model" ) _UpperCamelCase : Optional[int] = {} for k, v in model.items(): _UpperCamelCase : str = torch.from_numpy(lowercase_ ) if "running_var" in k: _UpperCamelCase : List[Any] = torch.tensor([0] ) _UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" ) _UpperCamelCase : Any = zero return new def lowercase__ ( ) -> Dict: """simple docstring""" print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' ) def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): _UpperCamelCase : Optional[Any] = cva.imread(lowercase_ ) else: _UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ ) assert img is not None, F'''could not connect to: {im}''' _UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": _UpperCamelCase : List[Any] = img[:, :, ::-1] return img def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]: """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
310
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCamelCase__ = datasets.utils.logging.get_logger(__name__) lowerCamelCase__ = ["names", "prefix"] lowerCamelCase__ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowerCamelCase__ = ["encoding_errors", "on_bad_lines"] lowerCamelCase__ = ["date_format"] @dataclass class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = "," SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :Optional[Union[int, List[int], str]] = "infer" SCREAMING_SNAKE_CASE__ :Optional[List[str]] = None SCREAMING_SNAKE_CASE__ :Optional[List[str]] = None SCREAMING_SNAKE_CASE__ :Optional[Union[int, str, List[int], List[str]]] = None SCREAMING_SNAKE_CASE__ :Optional[Union[List[int], List[str]]] = None SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :Optional[Literal["c", "python", "pyarrow"]] = None SCREAMING_SNAKE_CASE__ :Dict[Union[int, str], Callable[[Any], Any]] = None SCREAMING_SNAKE_CASE__ :Optional[list] = None SCREAMING_SNAKE_CASE__ :Optional[list] = None SCREAMING_SNAKE_CASE__ :bool = False SCREAMING_SNAKE_CASE__ :Optional[Union[int, List[int]]] = None SCREAMING_SNAKE_CASE__ :Optional[int] = None SCREAMING_SNAKE_CASE__ :Optional[Union[str, List[str]]] = None SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :bool = False SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :str = "." SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :str = '"' SCREAMING_SNAKE_CASE__ :int = 0 SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :int = 0 SCREAMING_SNAKE_CASE__ :bool = True SCREAMING_SNAKE_CASE__ :bool = False SCREAMING_SNAKE_CASE__ :Optional[str] = None SCREAMING_SNAKE_CASE__ :int = 10_000 SCREAMING_SNAKE_CASE__ :Optional[datasets.Features] = None SCREAMING_SNAKE_CASE__ :Optional[str] = "strict" SCREAMING_SNAKE_CASE__ :Literal["error", "warn", "skip"] = "error" SCREAMING_SNAKE_CASE__ :Optional[str] = None def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: if self.delimiter is not None: _UpperCamelCase : str = self.delimiter if self.column_names is not None: _UpperCamelCase : Tuple = self.column_names @property def __SCREAMING_SNAKE_CASE ( self : int ) -> int: _UpperCamelCase : List[Any] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = CsvConfig def __SCREAMING_SNAKE_CASE ( self : str ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] ) -> int: if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _UpperCamelCase : Optional[int] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__a , (str, list, tuple) ): _UpperCamelCase : Optional[Any] = data_files if isinstance(__a , __a ): _UpperCamelCase : Any = [files] _UpperCamelCase : Tuple = [dl_manager.iter_files(__a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCamelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(__a , __a ): _UpperCamelCase : Any = [files] _UpperCamelCase : Union[str, Any] = [dl_manager.iter_files(__a ) for file in files] splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) ) return splits def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : pa.Table ) -> pa.Table: if self.config.features is not None: _UpperCamelCase : Dict = self.config.features.arrow_schema if all(not require_storage_cast(__a ) for feature in self.config.features.values() ): # cheaper cast _UpperCamelCase : str = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCamelCase : str = table_cast(__a , __a ) return pa_table def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[int] ) -> List[str]: _UpperCamelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCamelCase : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ): _UpperCamelCase : Union[str, Any] = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__a ): _UpperCamelCase : Dict = pa.Table.from_pandas(__a ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__a ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(__a )}: {e}''' ) raise
310
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
1
"""simple docstring""" import math import tensorflow as tf from packaging import version def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = tf.convert_to_tensor(lowercase_ ) _UpperCamelCase : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) )) return x * cdf def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Tuple = tf.convert_to_tensor(lowercase_ ) _UpperCamelCase : Optional[Any] = tf.cast(math.pi ,x.dtype ) _UpperCamelCase : Union[str, Any] = tf.cast(0.04_4715 ,x.dtype ) _UpperCamelCase : Optional[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase_ ,3 )) )) return x * cdf def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : Dict = tf.convert_to_tensor(lowercase_ ) return x * tf.tanh(tf.math.softplus(lowercase_ ) ) def lowercase__ ( lowercase_ ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(lowercase_ ) _UpperCamelCase : int = tf.cast(0.04_4715 ,x.dtype ) _UpperCamelCase : Optional[Any] = tf.cast(0.79_7884_5608 ,x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(lowercase_ ) _UpperCamelCase : List[Any] = tf.cast(1.702 ,x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase__ ( lowercase_ ) -> List[str]: """simple docstring""" return tf.clip_by_value(_gelu(lowercase_ ) ,-10 ,10 ) def lowercase__ ( lowercase_ ,lowercase_=-1 ) -> List[str]: """simple docstring""" _UpperCamelCase, _UpperCamelCase : Optional[int] = tf.split(lowercase_ ,2 ,axis=lowercase_ ) return a * tf.math.sigmoid(lowercase_ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" return tf.keras.activations.gelu(lowercase_ ,approximate=lowercase_ ) lowerCamelCase__ = tf.keras.activations.gelu lowerCamelCase__ = approximate_gelu_wrap else: lowerCamelCase__ = _gelu lowerCamelCase__ = _gelu_new lowerCamelCase__ = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
310
"""simple docstring""" from typing import Any def lowercase__ ( lowercase_ ) -> list[Any]: """simple docstring""" if not input_list: return [] _UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list] _UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCamelCase__ = object() # For specifying empty leaf dict `{}` lowerCamelCase__ = object() def lowercase__ ( lowercase_ ,lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(lowercase_ ) - len(lowercase_ ) + 1 ): _UpperCamelCase : List[str] = [x.match(lowercase_ ) for x, y in zip(lowercase_ ,ks[i:] )] if matches and all(lowercase_ ): return True return False def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" def replace(lowercase_ ,lowercase_ ): for rule, replacement in rules: if _match(lowercase_ ,lowercase_ ): return replacement return val return replace def lowercase__ ( ) -> Optional[Any]: """simple docstring""" return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" ,lowercase_ )), (("transformer", "wte", "embedding"), P("mp" ,lowercase_ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowercase_ ,"mp" )), (("attention", "out_proj", "kernel"), P("mp" ,lowercase_ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowercase_ ,"mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" ,lowercase_ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = _get_partition_rules() _UpperCamelCase : str = _replacement_rules(lowercase_ ) _UpperCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowercase_ )} _UpperCamelCase : Dict = {k: replace(lowercase_ ,lowercase_ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowercase_ ) )
310
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = "rag" SCREAMING_SNAKE_CASE__ :List[str] = True def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any: super().__init__( bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" ) _UpperCamelCase : str = question_encoder_config.pop("model_type" ) _UpperCamelCase : Tuple = kwargs.pop("generator" ) _UpperCamelCase : str = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : str = AutoConfig.for_model(__a , **__a ) _UpperCamelCase : Optional[int] = reduce_loss _UpperCamelCase : str = label_smoothing _UpperCamelCase : int = exclude_bos_score _UpperCamelCase : List[str] = do_marginalize _UpperCamelCase : Optional[int] = title_sep _UpperCamelCase : Optional[int] = doc_sep _UpperCamelCase : Union[str, Any] = n_docs _UpperCamelCase : Tuple = max_combined_length _UpperCamelCase : Union[str, Any] = dataset _UpperCamelCase : Any = dataset_split _UpperCamelCase : List[str] = index_name _UpperCamelCase : int = retrieval_vector_size _UpperCamelCase : str = retrieval_batch_size _UpperCamelCase : Dict = passages_path _UpperCamelCase : str = index_path _UpperCamelCase : Tuple = use_dummy_dataset _UpperCamelCase : Union[str, Any] = output_retrieved _UpperCamelCase : Optional[Any] = do_deduplication _UpperCamelCase : str = use_cache if self.forced_eos_token_id is None: _UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: _UpperCamelCase : Dict = copy.deepcopy(self.__dict__ ) _UpperCamelCase : List[Any] = self.question_encoder.to_dict() _UpperCamelCase : Tuple = self.generator.to_dict() _UpperCamelCase : Any = self.__class__.model_type return output
310
1
"""simple docstring""" from __future__ import annotations import numpy as np def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" return np.maximum(0 ,lowercase_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
310
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int: _UpperCamelCase : Tuple = parent _UpperCamelCase : str = batch_size _UpperCamelCase : Tuple = image_size _UpperCamelCase : List[str] = patch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : List[str] = is_training _UpperCamelCase : Any = use_labels _UpperCamelCase : int = hidden_size _UpperCamelCase : List[Any] = num_hidden_layers _UpperCamelCase : Union[str, Any] = num_attention_heads _UpperCamelCase : Optional[int] = intermediate_size _UpperCamelCase : Any = hidden_act _UpperCamelCase : Dict = hidden_dropout_prob _UpperCamelCase : Dict = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = type_sequence_label_size _UpperCamelCase : int = initializer_range _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Any = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2 _UpperCamelCase : Optional[int] = num_patches + 1 def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : Union[str, Any] = None if self.use_labels: _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase : Any = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = ViTModel(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]: _UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() _UpperCamelCase : Any = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCamelCase : Union[str, Any] = 1 _UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a ) model.to(__a ) model.eval() _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : Dict = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int: _UpperCamelCase : Any = self.type_sequence_label_size _UpperCamelCase : Optional[Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase : Tuple = 1 _UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a ) model.to(__a ) model.eval() _UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase : List[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: _UpperCamelCase : Dict = self.prepare_config_and_inputs() ( ( _UpperCamelCase ), ( _UpperCamelCase ), ( _UpperCamelCase ), ) : Union[str, Any] = config_and_inputs _UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ :Any = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ :str = True SCREAMING_SNAKE_CASE__ :List[Any] = False SCREAMING_SNAKE_CASE__ :int = False SCREAMING_SNAKE_CASE__ :int = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase : Dict = ViTModelTester(self ) _UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: pass def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCamelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Any = model_class(__a ) _UpperCamelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : List[str] = [*signature.parameters.keys()] _UpperCamelCase : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : List[str] = ViTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: _UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a ) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[Any] = prepare_img() _UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : Dict = model(**__a ) # verify the logits _UpperCamelCase : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) _UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. _UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a ) _UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass with torch.no_grad(): _UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a ) # verify the logits _UpperCamelCase : int = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , __a ) _UpperCamelCase : int = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: _UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) _UpperCamelCase : int = self.default_image_processor _UpperCamelCase : Dict = prepare_img() _UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" ) _UpperCamelCase : Any = inputs.pixel_values.to(__a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _UpperCamelCase : int = model(__a )
310
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[int] = -1 _UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Any = TextStreamer(__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Optional[int] = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Dict = -1 _UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] ) _UpperCamelCase : Tuple = TextIteratorStreamer(__a ) _UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() _UpperCamelCase : Tuple = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict: _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Union[str, Any] = -1 _UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :] _UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Tuple = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" ) _UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a ) _UpperCamelCase : int = -1 _UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a ) model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCamelCase : int = cs.out[:-1] # Remove the final "\n" _UpperCamelCase : int = tokenizer(__a , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[Any] = -1 _UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 ) _UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__a ): _UpperCamelCase : List[str] = "" for new_text in streamer: streamer_text += new_text
310
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[int] = -1 _UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Any = TextStreamer(__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Optional[int] = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Dict = -1 _UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] ) _UpperCamelCase : Tuple = TextIteratorStreamer(__a ) _UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() _UpperCamelCase : Tuple = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict: _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Union[str, Any] = -1 _UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :] _UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Tuple = cs.out[:-1] self.assertEqual(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" ) _UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a ) _UpperCamelCase : int = -1 _UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a ) model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCamelCase : int = cs.out[:-1] # Remove the final "\n" _UpperCamelCase : int = tokenizer(__a , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _UpperCamelCase : Optional[Any] = -1 _UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 ) _UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__a ): _UpperCamelCase : List[str] = "" for new_text in streamer: streamer_text += new_text
310
1
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowerCamelCase__ = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Whether to SortishSamler or not."} ) SCREAMING_SNAKE_CASE__ :bool = field( default=_UpperCamelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "whether to use adafactor"} ) SCREAMING_SNAKE_CASE__ :Optional[float] = field( default=_UpperCamelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE__ :Optional[float] = field( default=_UpperCamelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE__ :Optional[float] = field(default=_UpperCamelCase , metadata={"help": "Dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE__ :Optional[float] = field( default=_UpperCamelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE__ :Optional[str] = field( default="linear" , metadata={"help": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
310
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" with open(lowercase_ ) as metadata_file: _UpperCamelCase : Dict = json.load(lowercase_ ) _UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"] # Load the entity vocab file _UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ ) # add an entry for [MASK2] _UpperCamelCase : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) _UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f: _UpperCamelCase : Tuple = json.load(lowercase_ ) _UpperCamelCase : Optional[int] = "MLukeTokenizer" with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(lowercase_ ,lowercase_ ) _UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ ) # Initialize the embeddings of the special tokens _UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0] _UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0] _UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) _UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCamelCase : Optional[Any] = state_dict[bias_name] _UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.''' _UpperCamelCase : List[Any] = state_dict[prefix + matrix_name] _UpperCamelCase : str = state_dict[prefix + matrix_name] _UpperCamelCase : Any = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCamelCase : int = state_dict["entity_predictions.bias"] _UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _UpperCamelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _UpperCamelCase : Union[str, Any] = state_dict[key] else: _UpperCamelCase : Dict = state_dict[key] _UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ ) if set(lowercase_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(lowercase_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" ) _UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _UpperCamelCase : Optional[Any] = (0, 9) _UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : List[str] = model(**lowercase_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 33, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase : Tuple = torch.Size((1, 1, 768) ) _UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ) _UpperCamelCase : int = "Tokyo is the capital of <mask>." _UpperCamelCase : List[Any] = (24, 30) _UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" ) _UpperCamelCase : Optional[Any] = model(**lowercase_ ) _UpperCamelCase : int = encoding["input_ids"][0].tolist() _UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase_ ) _UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item() _UpperCamelCase : Tuple = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase_ ) ) model.save_pretrained(lowercase_ ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"] _UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )] _UpperCamelCase : List[str] = {} for entry in data: _UpperCamelCase : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCamelCase : Dict = entity_id break _UpperCamelCase : Dict = F'''{language}:{entity_name}''' _UpperCamelCase : str = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
310
1
"""simple docstring""" import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowercase__ ( lowercase_ ) -> int: """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(lowercase_ ,"_dynamo" ): return False return isinstance(lowercase_ ,torch._dynamo.eval_frame.OptimizedModule ) def lowercase__ ( lowercase_ ,lowercase_ = True ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _UpperCamelCase : int = is_compiled_module(lowercase_ ) if is_compiled: _UpperCamelCase : Dict = model _UpperCamelCase : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Tuple = model.module if not keep_fpaa_wrapper: _UpperCamelCase : Tuple = getattr(lowercase_ ,"forward" ) _UpperCamelCase : str = model.__dict__.pop("_original_forward" ,lowercase_ ) if original_forward is not None: while hasattr(lowercase_ ,"__wrapped__" ): _UpperCamelCase : Optional[int] = forward.__wrapped__ if forward == original_forward: break _UpperCamelCase : int = forward if getattr(lowercase_ ,"_converted_to_transformer_engine" ,lowercase_ ): convert_model(lowercase_ ,to_transformer_engine=lowercase_ ) if is_compiled: _UpperCamelCase : Dict = model _UpperCamelCase : List[Any] = compiled_model return model def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" PartialState().wait_for_everyone() def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(lowercase_ ,lowercase_ ) elif PartialState().local_process_index == 0: torch.save(lowercase_ ,lowercase_ ) @contextmanager def lowercase__ ( **lowercase_ ) -> Optional[Any]: """simple docstring""" for key, value in kwargs.items(): _UpperCamelCase : Any = str(lowercase_ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowercase__ ( lowercase_ ) -> Any: """simple docstring""" if not hasattr(lowercase_ ,"__qualname__" ) and not hasattr(lowercase_ ,"__name__" ): _UpperCamelCase : Union[str, Any] = getattr(lowercase_ ,"__class__" ,lowercase_ ) if hasattr(lowercase_ ,"__qualname__" ): return obj.__qualname__ if hasattr(lowercase_ ,"__name__" ): return obj.__name__ return str(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for key, value in source.items(): if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Any = destination.setdefault(lowercase_ ,{} ) merge_dicts(lowercase_ ,lowercase_ ) else: _UpperCamelCase : Union[str, Any] = value return destination def lowercase__ ( lowercase_ = None ) -> bool: """simple docstring""" if port is None: _UpperCamelCase : Optional[int] = 29_500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
310
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a ) }
310
1
"""simple docstring""" from __future__ import annotations def lowercase__ ( lowercase_ ) -> int: """simple docstring""" for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(lowercase_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(lowercase_ ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
310
"""simple docstring""" from __future__ import annotations from math import pi def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=1e-12 ) -> Tuple: """simple docstring""" _UpperCamelCase : List[str] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowercase_ ,axis=1 ) ,a_min=lowercase_ ) ).T _UpperCamelCase : int = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowercase_ ,axis=1 ) ,a_min=lowercase_ ) ).T return jnp.matmul(lowercase_ ,norm_emb_a.T ) class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :CLIPConfig SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Any = FlaxCLIPVisionModule(self.config.vision_config ) _UpperCamelCase : Dict = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype ) _UpperCamelCase : Tuple = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) _UpperCamelCase : Tuple = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) _UpperCamelCase : Union[str, Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) _UpperCamelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self : Tuple , __a : str ) -> str: _UpperCamelCase : Union[str, Any] = self.vision_model(__a )[1] _UpperCamelCase : Optional[int] = self.visual_projection(__a ) _UpperCamelCase : Any = jax_cosine_distance(__a , self.special_care_embeds ) _UpperCamelCase : List[str] = jax_cosine_distance(__a , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs _UpperCamelCase : int = 0.0 _UpperCamelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment _UpperCamelCase : Any = jnp.round(__a , 3 ) _UpperCamelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__a ) # Use a lower threshold if an image has any special care concept _UpperCamelCase : Optional[int] = is_special_care * 0.01 _UpperCamelCase : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment _UpperCamelCase : List[str] = jnp.round(__a , 3 ) _UpperCamelCase : Union[str, Any] = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[int] = CLIPConfig SCREAMING_SNAKE_CASE__ :Dict = "clip_input" SCREAMING_SNAKE_CASE__ :Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Dict , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[int] , ) -> Tuple: if input_shape is None: _UpperCamelCase : Optional[Any] = (1, 224, 224, 3) _UpperCamelCase : List[str] = self.module_class(config=__a , dtype=__a , **__a ) super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) -> FrozenDict: # init input tensor _UpperCamelCase : Optional[int] = jax.random.normal(__a , __a ) _UpperCamelCase, _UpperCamelCase : Tuple = jax.random.split(__a ) _UpperCamelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} _UpperCamelCase : Optional[int] = self.module.init(__a , __a )["params"] return random_params def __call__( self : Tuple , __a : List[Any] , __a : dict = None , ) -> str: _UpperCamelCase : Tuple = jnp.transpose(__a , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
310
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( lowercase_ ) -> str: """simple docstring""" if "://" in dataset_path: _UpperCamelCase : List[Any] = dataset_path.split("://" )[1] return dataset_path def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ ) def lowercase__ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCamelCase : Dict = None _UpperCamelCase : str = None _UpperCamelCase : str = threading.Lock()
310
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowerCamelCase__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def lowercase__ ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]: """simple docstring""" _UpperCamelCase : Optional[Any] = BeautifulSoup(requests.get(url + location ).content ,"html.parser" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div" ,attrs={"data-tn-component": "organicJob"} ): _UpperCamelCase : Tuple = job.find("a" ,attrs={"data-tn-element": "jobTitle"} ).text.strip() _UpperCamelCase : str = job.find("span" ,{"class": "company"} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
310
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
310
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[str] = BioGptTokenizer SCREAMING_SNAKE_CASE__ :Optional[int] = False def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : int = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] _UpperCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""] _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__a ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[str] ) -> List[Any]: _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: _UpperCamelCase : Any = BioGptTokenizer(self.vocab_file , self.merges_file ) _UpperCamelCase : Dict = "lower" _UpperCamelCase : Optional[Any] = ["low", "er</w>"] _UpperCamelCase : List[Any] = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : Dict = tokens + ["<unk>"] _UpperCamelCase : List[Any] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) @slow def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: _UpperCamelCase : Optional[int] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) _UpperCamelCase : Any = tokenizer.encode("sequence builders" , add_special_tokens=__a ) _UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__a ) _UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a ) _UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
310
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100""" lowerCamelCase__ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: lowerCamelCase__ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: lowerCamelCase__ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
310
1
"""simple docstring""" import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , __a : Tuple , __a : List[Any]=7 , __a : Optional[int]=3 , __a : str=18 , __a : Tuple=30 , __a : str=400 , __a : Optional[int]=True , __a : Optional[int]=None , __a : Optional[Any]=True , __a : List[str]=[0.5, 0.5, 0.5] , __a : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Any: _UpperCamelCase : Union[str, Any] = size if size is not None else {"height": 18, "width": 18} _UpperCamelCase : Any = parent _UpperCamelCase : Union[str, Any] = batch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : Optional[Any] = image_size _UpperCamelCase : int = min_resolution _UpperCamelCase : Union[str, Any] = max_resolution _UpperCamelCase : Optional[Any] = do_resize _UpperCamelCase : Optional[int] = size _UpperCamelCase : int = do_normalize _UpperCamelCase : Optional[int] = image_mean _UpperCamelCase : List[Any] = image_std def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = DPTImageProcessor if is_vision_available() else None def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: _UpperCamelCase : Union[str, Any] = DPTImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: _UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "image_mean" ) ) self.assertTrue(hasattr(__a , "image_std" ) ) self.assertTrue(hasattr(__a , "do_normalize" ) ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "size" ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: _UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) _UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: # Initialize image_processing _UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input _UpperCamelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched _UpperCamelCase : Optional[Any] = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: # Initialize image_processing _UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input _UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched _UpperCamelCase : List[Any] = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __SCREAMING_SNAKE_CASE ( self : int ) -> int: # Initialize image_processing _UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input _UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched _UpperCamelCase : int = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
310
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl" def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Union[str, Any] = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = max_position_embeddings _UpperCamelCase : Optional[Any] = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
1
"""simple docstring""" from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = "T5Config" class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[str] = "mt5" SCREAMING_SNAKE_CASE__ :Dict = MTaConfig class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = "mt5" SCREAMING_SNAKE_CASE__ :int = MTaConfig class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = "mt5" SCREAMING_SNAKE_CASE__ :int = MTaConfig
310
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]: _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int: _UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) import datasets _UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _UpperCamelCase : List[Any] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @require_torch def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3" _UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) _UpperCamelCase : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = "facebook/detr-resnet-50" _UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Dict = "facebook/detr-resnet-50" _UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a ) _UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : Tuple = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: _UpperCamelCase : Tuple = 0.99_85 _UpperCamelCase : List[Any] = "facebook/detr-resnet-50" _UpperCamelCase : List[str] = pipeline("object-detection" , model=__a ) _UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd" _UpperCamelCase : int = 0.99_93 _UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a ) _UpperCamelCase : Union[str, Any] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
310
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a ) }
310
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCamelCase__ = {"UserAgent": UserAgent().random} def lowercase__ ( lowercase_ ) -> dict: """simple docstring""" _UpperCamelCase : str = script.contents[0] _UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , __a : str ) -> Tuple: _UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/''' _UpperCamelCase : Optional[Any] = self.get_json() def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict: _UpperCamelCase : int = requests.get(self.url , headers=__a ).text _UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : List[Any] ) -> str: return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : str ) -> str: return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: return self.user_data["username"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["full_name"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return self.user_data["biography"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["business_email"] @property def __SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["external_url"] @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.user_data["edge_followed_by"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.user_data["edge_follow"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool: return self.user_data["is_verified"] @property def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: return self.user_data["is_private"] def lowercase__ ( lowercase_ = "github" ) -> None: """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions _UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data ,lowercase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = InstagramUser("github") print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
310
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : str = SwinConfig( embed_dim=192 ,depths=(2, 2, 18, 2) ,num_heads=(6, 12, 24, 48) ,window_size=12 ,out_features=["stage2", "stage3", "stage4"] ,) _UpperCamelCase : Optional[int] = DetaConfig( backbone_config=lowercase_ ,num_queries=900 ,encoder_ffn_dim=2_048 ,decoder_ffn_dim=2_048 ,num_feature_levels=5 ,assign_first_stage=lowercase_ ,with_box_refine=lowercase_ ,two_stage=lowercase_ ,) # set labels _UpperCamelCase : Union[str, Any] = "huggingface/label-files" if "o365" in model_name: _UpperCamelCase : int = 366 _UpperCamelCase : Optional[int] = "object365-id2label.json" else: _UpperCamelCase : Any = 91 _UpperCamelCase : Dict = "coco-detection-id2label.json" _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Dict = json.load(open(cached_download(hf_hub_url(lowercase_ ,lowercase_ ,repo_type="dataset" ) ) ,"r" ) ) _UpperCamelCase : Dict = {int(lowercase_ ): v for k, v in idalabel.items()} _UpperCamelCase : Optional[Any] = idalabel _UpperCamelCase : int = {v: k for k, v in idalabel.items()} return config def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : str = [] # stem # fmt: off rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") ) rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") ) rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") ) rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") ) rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") ) rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") ) rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = dct.pop(lowercase_ ) _UpperCamelCase : Optional[int] = val def lowercase__ ( lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _UpperCamelCase : str = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _UpperCamelCase : str = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) _UpperCamelCase : Optional[int] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase : List[str] = in_proj_weight[:dim, :] _UpperCamelCase : Optional[int] = in_proj_bias[: dim] _UpperCamelCase : List[Any] = in_proj_weight[ dim : dim * 2, : ] _UpperCamelCase : Tuple = in_proj_bias[ dim : dim * 2 ] _UpperCamelCase : List[Any] = in_proj_weight[ -dim :, : ] _UpperCamelCase : List[Any] = in_proj_bias[-dim :] # fmt: on def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : Union[str, Any] = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention _UpperCamelCase : Optional[Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) _UpperCamelCase : Union[str, Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase : List[str] = in_proj_weight[:hidden_size, :] _UpperCamelCase : List[Any] = in_proj_bias[:hidden_size] _UpperCamelCase : Optional[Any] = in_proj_weight[ hidden_size : hidden_size * 2, : ] _UpperCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2] _UpperCamelCase : Optional[int] = in_proj_weight[-hidden_size:, :] _UpperCamelCase : List[Any] = in_proj_bias[-hidden_size:] def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCamelCase : Tuple = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw ) return im @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : List[Any] = get_deta_config(lowercase_ ) # load original state dict if model_name == "deta-swin-large": _UpperCamelCase : Dict = hf_hub_download(repo_id="nielsr/deta-checkpoints" ,filename="adet_swin_ft.pth" ) elif model_name == "deta-swin-large-o365": _UpperCamelCase : Dict = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" ,filename="deta_swin_pt_o365.pth" ) else: raise ValueError(F'''Model name {model_name} not supported''' ) _UpperCamelCase : List[Any] = torch.load(lowercase_ ,map_location="cpu" )["model"] # original state dict for name, param in state_dict.items(): print(lowercase_ ,param.shape ) # rename keys _UpperCamelCase : List[Any] = create_rename_keys(lowercase_ ) for src, dest in rename_keys: rename_key(lowercase_ ,lowercase_ ,lowercase_ ) read_in_swin_q_k_v(lowercase_ ,config.backbone_config ) read_in_decoder_q_k_v(lowercase_ ,lowercase_ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: _UpperCamelCase : Optional[Any] = state_dict.pop(lowercase_ ) _UpperCamelCase : List[str] = val if "input_proj" in key: _UpperCamelCase : List[Any] = state_dict.pop(lowercase_ ) _UpperCamelCase : str = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: _UpperCamelCase : Union[str, Any] = state_dict.pop(lowercase_ ) _UpperCamelCase : Tuple = val # finally, create HuggingFace model and load state dict _UpperCamelCase : List[str] = DetaForObjectDetection(lowercase_ ) model.load_state_dict(lowercase_ ) model.eval() _UpperCamelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu" model.to(lowercase_ ) # load image processor _UpperCamelCase : str = DetaImageProcessor(format="coco_detection" ) # verify our conversion on image _UpperCamelCase : Optional[Any] = prepare_img() _UpperCamelCase : Optional[Any] = processor(images=lowercase_ ,return_tensors="pt" ) _UpperCamelCase : str = encoding["pixel_values"] _UpperCamelCase : Tuple = model(pixel_values.to(lowercase_ ) ) # verify logits print("Logits:" ,outputs.logits[0, :3, :3] ) print("Boxes:" ,outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": _UpperCamelCase : str = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) _UpperCamelCase : Any = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": _UpperCamelCase : Dict = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) _UpperCamelCase : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] ,expected_logits.to(lowercase_ ) ,atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,expected_boxes.to(lowercase_ ) ,atol=1e-4 ) print("Everything ok!" ) if pytorch_dump_folder_path: # Save model and processor logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) model.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) # Push to hub if push_to_hub: print("Pushing model and processor to hub..." ) model.push_to_hub(F'''jozhang97/{model_name}''' ) processor.push_to_hub(F'''jozhang97/{model_name}''' ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-swin-large", choices=["deta-swin-large", "deta-swin-large-o365"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCamelCase__ = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
310
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[Any] = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : Any = _sin / (2 * q_factor) _UpperCamelCase : str = (1 - _cos) / 2 _UpperCamelCase : Any = 1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : List[str] = -2 * _cos _UpperCamelCase : Tuple = 1 - alpha _UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : List[str] = tau * frequency / samplerate _UpperCamelCase : str = sin(lowercase_ ) _UpperCamelCase : Optional[Any] = cos(lowercase_ ) _UpperCamelCase : Dict = _sin / (2 * q_factor) _UpperCamelCase : List[Any] = (1 + _cos) / 2 _UpperCamelCase : Optional[int] = -1 - _cos _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : str = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : Tuple = tau * frequency / samplerate _UpperCamelCase : Optional[int] = sin(lowercase_ ) _UpperCamelCase : Dict = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Dict = _sin / 2 _UpperCamelCase : int = 0 _UpperCamelCase : str = -ba _UpperCamelCase : List[str] = 1 + alpha _UpperCamelCase : Optional[int] = -2 * _cos _UpperCamelCase : Optional[Any] = 1 - alpha _UpperCamelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _UpperCamelCase : str = tau * frequency / samplerate _UpperCamelCase : Optional[Any] = sin(lowercase_ ) _UpperCamelCase : Optional[int] = cos(lowercase_ ) _UpperCamelCase : int = _sin / (2 * q_factor) _UpperCamelCase : List[str] = 1 - alpha _UpperCamelCase : int = -2 * _cos _UpperCamelCase : Union[str, Any] = 1 + alpha _UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : int = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : List[Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Optional[int] = 10 ** (gain_db / 40) _UpperCamelCase : str = 1 + alpha * big_a _UpperCamelCase : Union[str, Any] = -2 * _cos _UpperCamelCase : Optional[int] = 1 - alpha * big_a _UpperCamelCase : int = 1 + alpha / big_a _UpperCamelCase : Optional[Any] = -2 * _cos _UpperCamelCase : Any = 1 - alpha / big_a _UpperCamelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Union[str, Any] = tau * frequency / samplerate _UpperCamelCase : Any = sin(lowercase_ ) _UpperCamelCase : Union[str, Any] = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : Any = big_a * (pmc + aaa) _UpperCamelCase : Dict = 2 * big_a * mpc _UpperCamelCase : str = big_a * (pmc - aaa) _UpperCamelCase : Dict = ppmc + aaa _UpperCamelCase : List[Any] = -2 * pmpc _UpperCamelCase : Dict = ppmc - aaa _UpperCamelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter: """simple docstring""" _UpperCamelCase : Optional[int] = tau * frequency / samplerate _UpperCamelCase : int = sin(lowercase_ ) _UpperCamelCase : Any = cos(lowercase_ ) _UpperCamelCase : str = _sin / (2 * q_factor) _UpperCamelCase : str = 10 ** (gain_db / 40) _UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha _UpperCamelCase : List[Any] = big_a * (ppmc + aaa) _UpperCamelCase : Dict = -2 * big_a * pmpc _UpperCamelCase : Dict = big_a * (ppmc - aaa) _UpperCamelCase : Optional[Any] = pmc + aaa _UpperCamelCase : Any = 2 * mpc _UpperCamelCase : Any = pmc - aaa _UpperCamelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
310
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCamelCase__ = { "vocab_file": { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt", }, "tokenizer_file": { "unc-nlp/lxmert-base-uncased": ( "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json" ), }, } lowerCamelCase__ = { "unc-nlp/lxmert-base-uncased": 512, } lowerCamelCase__ = { "unc-nlp/lxmert-base-uncased": {"do_lower_case": True}, } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ :Dict = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ :Any = LxmertTokenizer def __init__( self : List[Any] , __a : Optional[Any]=None , __a : Dict=None , __a : List[str]=True , __a : Optional[Any]="[UNK]" , __a : List[str]="[SEP]" , __a : Optional[Any]="[PAD]" , __a : Any="[CLS]" , __a : Any="[MASK]" , __a : Tuple=True , __a : str=None , **__a : Tuple , ) -> List[Any]: super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) _UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __a ) != do_lower_case or normalizer_state.get("strip_accents" , __a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars ): _UpperCamelCase : Tuple = getattr(__a , normalizer_state.pop("type" ) ) _UpperCamelCase : Optional[Any] = do_lower_case _UpperCamelCase : Tuple = strip_accents _UpperCamelCase : str = tokenize_chinese_chars _UpperCamelCase : int = normalizer_class(**__a ) _UpperCamelCase : Tuple = do_lower_case def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : Optional[int]=None ) -> List[Any]: _UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: _UpperCamelCase : Dict = [self.sep_token_id] _UpperCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[str] = None ) -> Tuple[str]: _UpperCamelCase : Dict = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
310
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
1
"""simple docstring""" import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC lowerCamelCase__ = parse(importlib.metadata.version("torch")) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) _UpperCamelCase : Union[str, Any] = STR_OPERATION_TO_FUNC[operation] if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Tuple = parse(importlib.metadata.version(lowercase_ ) ) return operation(lowercase_ ,parse(lowercase_ ) ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]: """simple docstring""" return compare_versions(lowercase_ ,lowercase_ ,lowercase_ )
310
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = prime_factors(lowercase_ ) if is_square_free(lowercase_ ): return -1 if len(lowercase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
310
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase__ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTMAEPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
310
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ :Dict = True SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True} SCREAMING_SNAKE_CASE__ :Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] _UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) ) _UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCamelCase : str = {"unk_token": "<unk>"} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__a ) ) def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple: _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: _UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase : Optional[Any] = "lower newer" _UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] _UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) _UpperCamelCase : str = tokens + [tokenizer.unk_token] _UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: if not self.test_rust_tokenizer: return _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = "lower newer" # Testing tokenization _UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens _UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) _UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens _UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a ) _UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a ) _UpperCamelCase : List[str] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token _UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token] _UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input _UpperCamelCase : Optional[int] = "This is a simple input" _UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Dict = ("This is a simple input", "This is a pair") _UpperCamelCase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: _UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input _UpperCamelCase : Union[str, Any] = "This is a simple input" _UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"] _UpperCamelCase : str = ("This is a simple input", "This is a pair") _UpperCamelCase : List[str] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] _UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id _UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" ) _UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) _UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" ) _UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: _UpperCamelCase : Any = "$$$" _UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) _UpperCamelCase : int = "This is a simple input" _UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"] _UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id _UpperCamelCase : str = tokenizer(__a ) _UpperCamelCase : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids ) _UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __SCREAMING_SNAKE_CASE ( self : int ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase : Tuple = "Encode this." _UpperCamelCase : List[str] = "This one too please." _UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a ) encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a ) _UpperCamelCase : int = tokenizer.encode_plus( __a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , ) _UpperCamelCase : str = encoded_sequence_dict["input_ids"] _UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(__a ) , len(__a ) ) _UpperCamelCase : Union[str, Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__a ) ] _UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(__a , __a ) @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : int ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Any = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) _UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" ) _UpperCamelCase : Optional[Any] = tokenizer.encode( __a , ) self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: _UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a ) _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : Union[str, Any] = tokenizer.encode( __a , ) # Same as above self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: _UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a ) _UpperCamelCase : List[str] = "bos" _UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"] _UpperCamelCase : List[Any] = "A photo of a cat" _UpperCamelCase : List[Any] = tokenizer.encode( __a , ) # We changed the bos token self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) _UpperCamelCase : Tuple = tokenizer.encode( __a , ) self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
310
1
"""simple docstring""" def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : Tuple = abs(lowercase_ ) _UpperCamelCase : Any = 0 while n > 0: res += n % 10 n //= 10 return res def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : int = abs(lowercase_ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def lowercase__ ( lowercase_ ) -> int: """simple docstring""" return sum(int(lowercase_ ) for c in str(abs(lowercase_ ) ) ) def lowercase__ ( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowercase_ ,lowercase_ ) -> None: _UpperCamelCase : Union[str, Any] = F'''{func.__name__}({value})''' _UpperCamelCase : Dict = timeit(F'''__main__.{call}''' ,setup="import __main__" ) print(F'''{call:56} = {func(lowercase_ )} -- {timing:.4f} seconds''' ) for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(lowercase_ ,lowercase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
310
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = load_tool("text-question-answering" ) self.tool.setup() _UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__a , "launched the BigScience Research Workshop" )
310
1
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " F'''{test_file} instead.''' ) _UpperCamelCase : Dict = components[-1] if not test_fn.endswith("py" ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith("test_modeling_" ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) _UpperCamelCase : str = components[:-1] + [test_fn.replace(".py" ,"" )] _UpperCamelCase : Union[str, Any] = ".".join(lowercase_ ) return test_module_path def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : Any = get_module_path(lowercase_ ) _UpperCamelCase : Optional[Any] = importlib.import_module(lowercase_ ) return test_module def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Any = [] _UpperCamelCase : Tuple = get_test_module(lowercase_ ) for attr in dir(lowercase_ ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(lowercase_ ,lowercase_ ) ) # sort with class names return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ ) def lowercase__ ( lowercase_ ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] _UpperCamelCase : int = get_test_module(lowercase_ ) for attr in dir(lowercase_ ): _UpperCamelCase : List[Any] = getattr(lowercase_ ,lowercase_ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). _UpperCamelCase : List[str] = getattr(lowercase_ ,"all_model_classes" ,[] ) if len(lowercase_ ) > 0: test_classes.append(lowercase_ ) # sort with class names return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ ) def lowercase__ ( lowercase_ ) -> Tuple: """simple docstring""" _UpperCamelCase : Any = get_test_classes(lowercase_ ) _UpperCamelCase : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ ) def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = test_class() if hasattr(lowercase_ ,"setUp" ): test.setUp() _UpperCamelCase : Optional[Any] = None if hasattr(lowercase_ ,"model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: _UpperCamelCase : List[str] = test.model_tester.__class__ return model_tester def lowercase__ ( lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = get_test_classes(lowercase_ ) _UpperCamelCase : Optional[Any] = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowercase_ ) # sort with class names return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : int = get_test_classes_for_model(lowercase_ ,lowercase_ ) _UpperCamelCase : Dict = [] for test_class in test_classes: _UpperCamelCase : int = get_model_tester_from_test_class(lowercase_ ) if tester_class is not None: tester_classes.append(lowercase_ ) # sort with class names return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ ) def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : str = get_test_classes(lowercase_ ) _UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes} return test_tester_mapping def lowercase__ ( lowercase_ ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : Optional[int] = get_model_classes(lowercase_ ) _UpperCamelCase : Optional[int] = { model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes } return model_test_mapping def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Union[str, Any] = get_model_classes(lowercase_ ) _UpperCamelCase : Any = { model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes } return model_to_tester_mapping def lowercase__ ( lowercase_ ) -> str: """simple docstring""" if isinstance(lowercase_ ,lowercase_ ): return o elif isinstance(lowercase_ ,lowercase_ ): return o.__name__ elif isinstance(lowercase_ ,(list, tuple) ): return [to_json(lowercase_ ) for x in o] elif isinstance(lowercase_ ,lowercase_ ): return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()} else: return o
310
"""simple docstring""" lowerCamelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict: """simple docstring""" _UpperCamelCase : Tuple = [False] * len(lowercase_ ) _UpperCamelCase : Dict = [s] _UpperCamelCase : List[str] = True while queue: _UpperCamelCase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase_ ) _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = u return visited[t] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : int = [-1] * (len(lowercase_ )) _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ): _UpperCamelCase : int = float("Inf" ) _UpperCamelCase : Optional[Any] = sink while s != source: # Find the minimum value in select path _UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] ) _UpperCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _UpperCamelCase : Union[str, Any] = sink while v != source: _UpperCamelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase : Dict = parent[v] for i in range(len(lowercase_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
310
1
"""simple docstring""" import warnings from functools import wraps from typing import Callable def lowercase__ ( lowercase_ ) -> Callable: """simple docstring""" @wraps(lowercase_ ) def _inner_fn(*lowercase_ ,**lowercase_ ): warnings.warn( (F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') ,lowercase_ ,) return fn(*lowercase_ ,**lowercase_ ) return _inner_fn
310
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = ["pixel_values"] def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256} _UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" ) _UpperCamelCase : str = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : int = do_center_crop _UpperCamelCase : int = crop_size _UpperCamelCase : Optional[Any] = resample _UpperCamelCase : Dict = do_rescale _UpperCamelCase : Any = rescale_factor _UpperCamelCase : Any = offset _UpperCamelCase : Union[str, Any] = do_normalize _UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" in size: _UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: _UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]: _UpperCamelCase : Any = image.astype(np.floataa ) if offset: _UpperCamelCase : Dict = image - (scale / 2) return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = to_numpy_array(__a ) if do_resize: _UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a ) if do_center_crop: _UpperCamelCase : Dict = self.center_crop(__a , size=__a ) if do_rescale: _UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a ) if do_normalize: _UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a ) _UpperCamelCase : str = to_channel_dimension_format(__a , __a ) return image def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: _UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = offset if offset is not None else self.offset _UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCamelCase : Union[str, Any] = make_batched(__a ) _UpperCamelCase : Optional[Any] = [ [ self._preprocess_image( image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , ) for img in video ] for video in videos ] _UpperCamelCase : List[Any] = {"pixel_values": videos} return BatchFeature(data=__a , tensor_type=__a )
310
1
"""simple docstring""" def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" _UpperCamelCase : Tuple = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
310
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase__ = True except ImportError: lowerCamelCase__ = False try: from torch.hub import _get_torch_home lowerCamelCase__ = _get_torch_home() except ImportError: lowerCamelCase__ = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) lowerCamelCase__ = os.path.join(torch_cache_home, "transformers") lowerCamelCase__ = "https://cdn.huggingface.co" lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert" lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) lowerCamelCase__ = os.path.join(PATH, "config.yaml") lowerCamelCase__ = os.path.join(PATH, "attributes.txt") lowerCamelCase__ = os.path.join(PATH, "objects.txt") lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase__ = "pytorch_model.bin" lowerCamelCase__ = "config.yaml" def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _UpperCamelCase : Any = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = OrderedDict() with open(lowercase_ ,"rb" ) as f: _UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _UpperCamelCase : List[str] = ckp.pop(lowercase_ ) if isinstance(lowercase_ ,np.ndarray ): _UpperCamelCase : List[Any] = torch.tensor(lowercase_ ) else: assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ ) _UpperCamelCase : Optional[Any] = v return r class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = {} def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any: _UpperCamelCase : Optional[Any] = name _UpperCamelCase : Optional[Any] = level _UpperCamelCase : Union[str, Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _UpperCamelCase : Optional[int] = copy.deepcopy(__a ) _UpperCamelCase : Dict = copy.deepcopy(__a ) if isinstance(__a , __a ): _UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 ) _UpperCamelCase : Optional[Any] = v setattr(self , __a , __a ) _UpperCamelCase : Optional[Any] = d def __repr__( self : List[str] ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int: _UpperCamelCase : Any = val _UpperCamelCase : Optional[Any] = val _UpperCamelCase : Dict = key.split("." ) _UpperCamelCase : int = len(__a ) - 1 _UpperCamelCase : List[str] = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _UpperCamelCase : str = val else: _UpperCamelCase : List[str] = pointer[l] def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self._pointer def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict: with open(F'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]: with open(F'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]: with open(__a ) as stream: _UpperCamelCase : int = load(__a , Loader=__a ) return data def __str__( self : List[str] ) -> Tuple: _UpperCamelCase : List[str] = " " if self._name != "root": _UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n''' else: _UpperCamelCase : Any = "" _UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _UpperCamelCase : Optional[Any] = level return r[:-1] @classmethod def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a ) _UpperCamelCase : str = kwargs.pop("resume_download" , __a ) _UpperCamelCase : Any = kwargs.pop("proxies" , __a ) _UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _UpperCamelCase : Optional[Any] = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _UpperCamelCase : Optional[int] = pretrained_model_name_or_path else: _UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _UpperCamelCase : Optional[int] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _UpperCamelCase : List[Any] = Config.load_yaml(__a ) except EnvironmentError: _UpperCamelCase : Union[str, Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device ) _UpperCamelCase : str = in_tensor.numpy() _UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), ( F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Dict = urlparse(lowercase_ ) return parsed.scheme in ("http", "https") def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str: """simple docstring""" _UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowercase_ ,lowercase_ ): ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() ) elif isinstance(lowercase_ ,lowercase_ ): ua += "; " + user_agent _UpperCamelCase : Any = {"user-agent": ua} if resume_size > 0: _UpperCamelCase : str = "bytes=%d-" % (resume_size,) _UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ ) if response.status_code == 416: # Range not satisfiable return _UpperCamelCase : List[str] = response.headers.get("Content-Length" ) _UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None _UpperCamelCase : Optional[int] = tqdm( unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowercase_ ) ) temp_file.write(lowercase_ ) progress.close() def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple: """simple docstring""" if cache_dir is None: _UpperCamelCase : str = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Dict = str(lowercase_ ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) _UpperCamelCase : Dict = None if not local_files_only: try: _UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ ) if response.status_code == 200: _UpperCamelCase : str = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ ) # get cache path to put the file _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowercase_ ): return cache_path else: _UpperCamelCase : Optional[int] = [ file for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(lowercase_ ) > 0: return os.path.join(lowercase_ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(lowercase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _UpperCamelCase : Dict = cache_path + ".lock" with FileLock(lowercase_ ): # If the download just completed while the lock was activated. if os.path.exists(lowercase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _UpperCamelCase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(lowercase_ ,"a+b" ) as f: yield f _UpperCamelCase : Union[str, Any] = _resumable_file_manager if os.path.exists(lowercase_ ): _UpperCamelCase : str = os.stat(lowercase_ ).st_size else: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ ) _UpperCamelCase : Optional[Any] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,) http_get( lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,) os.replace(temp_file.name ,lowercase_ ) _UpperCamelCase : Optional[int] = {"url": url, "etag": etag} _UpperCamelCase : List[str] = cache_path + ".json" with open(lowercase_ ,"w" ) as meta_file: json.dump(lowercase_ ,lowercase_ ) return cache_path def lowercase__ ( lowercase_ ,lowercase_=None ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = url.encode("utf-8" ) _UpperCamelCase : List[str] = shaaaa(lowercase_ ) _UpperCamelCase : List[str] = url_hash.hexdigest() if etag: _UpperCamelCase : Optional[Any] = etag.encode("utf-8" ) _UpperCamelCase : Optional[Any] = shaaaa(lowercase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str: """simple docstring""" if cache_dir is None: _UpperCamelCase : List[Any] = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if is_remote_url(lowercase_ ): # URL, so get it from the cache (downloading if necessary) _UpperCamelCase : Union[str, Any] = get_from_cache( lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,) elif os.path.exists(lowercase_ ): # File, and it exists. _UpperCamelCase : List[str] = url_or_filename elif urlparse(lowercase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(lowercase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) ) if extract_compressed_file: if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ ) _UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted" _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions _UpperCamelCase : Optional[int] = output_path + ".lock" with FileLock(lowercase_ ): shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ ) os.makedirs(lowercase_ ) if is_zipfile(lowercase_ ): with ZipFile(lowercase_ ,"r" ) as zip_file: zip_file.extractall(lowercase_ ) zip_file.close() elif tarfile.is_tarfile(lowercase_ ): _UpperCamelCase : int = tarfile.open(lowercase_ ) tar_file.extractall(lowercase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) ) return output_path_extracted return output_path def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): with open(lowercase_ ) as f: _UpperCamelCase : Tuple = eval(f.read() ) else: _UpperCamelCase : str = requests.get(lowercase_ ) try: _UpperCamelCase : Optional[int] = requests.json() except Exception: _UpperCamelCase : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: _UpperCamelCase : List[Any] = eval(lowercase_ ) except Exception: _UpperCamelCase : int = data.split("\n" ) req.close() return data def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[Any] = requests.get(lowercase_ ) _UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowercase_ ) with open(lowercase_ ,"rb" ) as stream: _UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ ) _UpperCamelCase : Union[str, Any] = weights.pop("model" ) _UpperCamelCase : Optional[int] = {} for k, v in model.items(): _UpperCamelCase : str = torch.from_numpy(lowercase_ ) if "running_var" in k: _UpperCamelCase : List[Any] = torch.tensor([0] ) _UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" ) _UpperCamelCase : Any = zero return new def lowercase__ ( ) -> Dict: """simple docstring""" print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' ) def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): _UpperCamelCase : Optional[Any] = cva.imread(lowercase_ ) else: _UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ ) assert img is not None, F'''could not connect to: {im}''' _UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": _UpperCamelCase : List[Any] = img[:, :, ::-1] return img def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]: """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
310
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
310
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = ["pixel_values"] def __init__( self : Any , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : Dict[str, int] = None , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Union[str, Any] = size if size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[int] = get_size_dict(__a ) _UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Optional[int] = get_size_dict(__a , default_to_square=__a , param_name="crop_size" ) _UpperCamelCase : List[str] = do_resize _UpperCamelCase : Union[str, Any] = do_rescale _UpperCamelCase : List[Any] = do_normalize _UpperCamelCase : int = do_center_crop _UpperCamelCase : str = crop_size _UpperCamelCase : List[str] = size _UpperCamelCase : Tuple = resample _UpperCamelCase : int = rescale_factor _UpperCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ) -> np.ndarray: _UpperCamelCase : Union[str, Any] = get_size_dict(__a ) if "shortest_edge" in size: _UpperCamelCase : int = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCamelCase : str = (size["height"], size["width"]) else: raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ) -> np.ndarray: _UpperCamelCase : List[Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple ) -> np.ndarray: return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : Optional[Any] , ) -> BatchFeature: _UpperCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : Dict = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Dict = get_size_dict(__a , param_name="crop_size" , default_to_square=__a ) _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : int = image_std if image_std is not None else self.image_std _UpperCamelCase : Optional[Any] = size if size is not None else self.size _UpperCamelCase : str = get_size_dict(__a ) if not is_batched(__a ): _UpperCamelCase : Any = [images] if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. _UpperCamelCase : int = [to_numpy_array(__a ) for image in images] if do_resize: _UpperCamelCase : Optional[Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: _UpperCamelCase : Any = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: _UpperCamelCase : Union[str, Any] = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: _UpperCamelCase : Tuple = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] _UpperCamelCase : Any = [to_channel_dimension_format(__a , __a ) for image in images] _UpperCamelCase : int = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
310
"""simple docstring""" from typing import Any def lowercase__ ( lowercase_ ) -> list[Any]: """simple docstring""" if not input_list: return [] _UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list] _UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
310
1