code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _lowerCAmelCase = s_dict.pop(lowerCAmelCase ) elif "subsample" in key: _lowerCAmelCase = s_dict.pop(lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = emb.weight.shape _lowerCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase ) _lowerCAmelCase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = mam_aaa["""args"""] _lowerCAmelCase = mam_aaa["""model"""] _lowerCAmelCase = state_dict["""decoder.output_projection.weight"""] remove_ignore_keys_(lowerCAmelCase ) rename_keys(lowerCAmelCase ) _lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0] _lowerCAmelCase = args.share_decoder_input_output_embed _lowerCAmelCase = [int(lowerCAmelCase ) for i in args.conv_kernel_sizes.split(""",""" )] _lowerCAmelCase = SpeechaTextConfig( vocab_size=lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowerCAmelCase , num_beams=5 , max_length=2_00 , use_cache=lowerCAmelCase , decoder_start_token_id=2 , early_stopping=lowerCAmelCase , ) _lowerCAmelCase = SpeechaTextForConditionalGeneration(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: _lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _lowerCAmelCase = lm_head_weights model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') A__ : Dict =parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Tuple ={ '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : List[Any] ={ '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =[ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] A__ : int =['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys A__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
70
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A__ : List[Any] =pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_dataset(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_metric(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_names(lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _lowerCAmelCase = expected_configs[0] assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
70
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: Tuple = PandasConfig def lowercase__ ( self : Optional[Any] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any: for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) ) yield i, self._cast_table(__snake_case )
70
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCAmelCase : def __init__( self : Dict , __snake_case : Tuple , __snake_case : int=2 , __snake_case : Optional[int]=True , __snake_case : int=False , __snake_case : Tuple=10 , __snake_case : Union[str, Any]=3 , __snake_case : Optional[Any]=32 * 8 , __snake_case : List[Any]=32 * 8 , __snake_case : int=4 , __snake_case : int=64 , ) -> str: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = is_training _lowerCAmelCase = use_auxiliary_loss _lowerCAmelCase = num_queries _lowerCAmelCase = num_channels _lowerCAmelCase = min_size _lowerCAmelCase = max_size _lowerCAmelCase = num_labels _lowerCAmelCase = hidden_dim _lowerCAmelCase = hidden_dim def lowercase__ ( self : Optional[int] ) -> List[Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __snake_case ) _lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case ) _lowerCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5 ).float() _lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long() _lowerCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCAmelCase = self.num_queries _lowerCAmelCase = self.num_labels _lowerCAmelCase = [1, 1, 1, 1] _lowerCAmelCase = self.num_channels _lowerCAmelCase = 64 _lowerCAmelCase = 1_28 _lowerCAmelCase = self.hidden_dim _lowerCAmelCase = self.hidden_dim _lowerCAmelCase = self.hidden_dim return config def lowercase__ ( self : Tuple ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowercase__ ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> int: _lowerCAmelCase = output.encoder_hidden_states _lowerCAmelCase = output.pixel_decoder_hidden_states _lowerCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__snake_case ) , config.decoder_layers ) def lowercase__ ( self : List[str] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Tuple , __snake_case : Dict=False ) -> Tuple: with torch.no_grad(): _lowerCAmelCase = MaskaFormerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(pixel_values=__snake_case , pixel_mask=__snake_case ) _lowerCAmelCase = model(__snake_case , output_hidden_states=__snake_case ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__snake_case , __snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Tuple , __snake_case : Any , __snake_case : List[Any] ) -> str: _lowerCAmelCase = MaskaFormerForUniversalSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() def comm_check_on_output(__snake_case : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCAmelCase = model(pixel_values=__snake_case , pixel_mask=__snake_case ) _lowerCAmelCase = model(__snake_case ) comm_check_on_output(__snake_case ) _lowerCAmelCase = model( pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case ) comm_check_on_output(__snake_case ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: int = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowercase: Union[str, Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _lowercase: str = False _lowercase: str = False _lowercase: Optional[Any] = False _lowercase: Optional[int] = False def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = MaskaFormerModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case ) def lowercase__ ( self : Any ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def lowercase__ ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def lowercase__ ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def lowercase__ ( self : Optional[Any] ) -> Tuple: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase__ ( self : List[Any] ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : List[Any] ) -> Tuple: pass def lowercase__ ( self : Optional[int] ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) @slow def lowercase__ ( self : str ) -> List[str]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCAmelCase = MaskaFormerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = (self.model_tester.min_size,) * 2 _lowerCAmelCase = { """pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ), """mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ), """class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(), } _lowerCAmelCase = self.model_tester.get_config() _lowerCAmelCase = MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case ) _lowerCAmelCase = model(**__snake_case ) self.assertTrue(outputs.loss is not None ) def lowercase__ ( self : List[Any] ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case ) def lowercase__ ( self : Tuple ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ).to(__snake_case ) _lowerCAmelCase = model(**__snake_case , output_attentions=__snake_case ) self.assertTrue(outputs.attentions is not None ) def lowercase__ ( self : str ) -> Tuple: if not self.model_tester.is_training: return _lowerCAmelCase = self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss loss.backward() def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ).to(__snake_case ) model.train() _lowerCAmelCase = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ) _lowerCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__snake_case ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A__ : Dict =1e-4 def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class UpperCAmelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Optional[int]: return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase__ ( self : int ) -> List[Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase__ ( self : List[str] ) -> Union[str, Any]: _lowerCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) _lowerCAmelCase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) ) with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) _lowerCAmelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(__snake_case ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) _lowerCAmelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(__snake_case ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) _lowerCAmelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(__snake_case ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) ) def lowercase__ ( self : int ) -> int: _lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval() _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) _lowerCAmelCase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) ) with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) # masks_queries_logits _lowerCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _lowerCAmelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCAmelCase = torch.tensor(__snake_case ).to(__snake_case ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) # class_queries_logits _lowerCAmelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) ) def lowercase__ ( self : Any ) -> List[str]: _lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval() _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) _lowerCAmelCase = inputs["""pixel_values"""].to(__snake_case ) _lowerCAmelCase = [el.to(__snake_case ) for el in inputs["""mask_labels"""]] _lowerCAmelCase = [el.to(__snake_case ) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) self.assertTrue(outputs.loss is not None )
70
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase : def __init__( self : str , __snake_case : Any ) -> str: _lowerCAmelCase = str(id_ ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = [] _lowerCAmelCase = {} # {vertex:distance} def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any: return self.key < other.key def __repr__( self : Optional[Any] ) -> Optional[Any]: return self.id def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]: self.neighbors.append(__snake_case ) def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = weight def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = graph[:] while q: _lowerCAmelCase = min(lowerCAmelCase ) q.remove(lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] for i in range(1 , len(lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = list(lowerCAmelCase ) hq.heapify(lowerCAmelCase ) while h: _lowerCAmelCase = hq.heappop(lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] hq.heapify(lowerCAmelCase ) for i in range(1 , len(lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCamelCase__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
70
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A__ : Tuple =logging.get_logger(__name__) A__ : List[Any] ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } A__ : List[Any] =[ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for attribute in key.split(""".""" ): _lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ) if weight_type is not None: _lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ).shape else: _lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": _lowerCAmelCase = value elif weight_type == "weight_g": _lowerCAmelCase = value elif weight_type == "weight_v": _lowerCAmelCase = value elif weight_type == "bias": _lowerCAmelCase = value elif weight_type == "running_mean": _lowerCAmelCase = value elif weight_type == "running_var": _lowerCAmelCase = value elif weight_type == "num_batches_tracked": _lowerCAmelCase = value elif weight_type == "inv_freq": _lowerCAmelCase = value else: _lowerCAmelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = fairseq_model.state_dict() _lowerCAmelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) _lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): _lowerCAmelCase = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _lowerCAmelCase = True if "*" in mapped_key: _lowerCAmelCase = name.split(lowerCAmelCase )[0].split(""".""" )[-2] _lowerCAmelCase = mapped_key.replace("""*""" , lowerCAmelCase ) if "pos_bias_u" in name: _lowerCAmelCase = None elif "pos_bias_v" in name: _lowerCAmelCase = None elif "weight_g" in name: _lowerCAmelCase = """weight_g""" elif "weight_v" in name: _lowerCAmelCase = """weight_v""" elif "bias" in name: _lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCAmelCase = """weight""" elif "running_mean" in name: _lowerCAmelCase = """running_mean""" elif "inv_freq" in name: _lowerCAmelCase = """inv_freq""" elif "running_var" in name: _lowerCAmelCase = """running_var""" elif "num_batches_tracked" in name: _lowerCAmelCase = """num_batches_tracked""" else: _lowerCAmelCase = None set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) continue if not is_used: unused_weights.append(lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] _lowerCAmelCase = name.split(""".""" ) _lowerCAmelCase = int(items[0] ) _lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _lowerCAmelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _lowerCAmelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) _lowerCAmelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) _lowerCAmelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ): """simple docstring""" if config_path is not None: _lowerCAmelCase = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase , hidden_act="""swish""" ) else: _lowerCAmelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCAmelCase = """rotary""" if is_finetuned: if dict_path: _lowerCAmelCase = Dictionary.load(lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCAmelCase = target_dict.pad_index _lowerCAmelCase = target_dict.bos_index _lowerCAmelCase = target_dict.eos_index _lowerCAmelCase = len(target_dict.symbols ) _lowerCAmelCase = os.path.join(lowerCAmelCase , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase ) ) return os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) _lowerCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCAmelCase = 0 _lowerCAmelCase = 1 with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = WavaVecaCTCTokenizer( lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase , ) _lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False _lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , ) _lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase ) processor.save_pretrained(lowerCAmelCase ) _lowerCAmelCase = WavaVecaConformerForCTC(lowerCAmelCase ) else: _lowerCAmelCase = WavaVecaConformerForPreTraining(lowerCAmelCase ) if is_finetuned: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: _lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" ) _lowerCAmelCase = fairseq.tasks.setup_task(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase ) _lowerCAmelCase = model[0].eval() recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) A__ : List[Any] =parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
70
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss _lowerCAmelCase = -(labels.shape[-1] * loss.item()) _lowerCAmelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
70
1
'''simple docstring''' A__ : Union[str, Any] =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00] number //= 10_00_00 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution A__ : list[bool | None] =[None] * 10_00_00_00 A__ : Union[str, Any] =True A__ : int =False def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCAmelCase = chain(next_number(lowerCAmelCase ) ) _lowerCAmelCase = number_chain while number < 10_00_00_00: _lowerCAmelCase = number_chain number *= 10 return number_chain def UpperCamelCase__ ( lowerCAmelCase = 10_00_00_00 ): """simple docstring""" for i in range(1 , lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
70
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A__ : Any =logging.get_logger(__name__) A__ : List[Any] ='''▁''' A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] ={ '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A__ : Dict ={ '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCAmelCase ( snake_case_ ): _lowercase: int = VOCAB_FILES_NAMES _lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase: str = ['''input_ids''', '''attention_mask'''] _lowercase: List[int] = [] _lowercase: List[int] = [] def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase = legacy_behaviour super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__snake_case ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase = 1 _lowerCAmelCase = len(self.sp_model ) _lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case ) } _lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" _lowerCAmelCase = self.lang_code_to_id[self._src_lang] _lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> List[str]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None _lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase__ ( self : List[Any] ) -> Any: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase__ ( self : int ) -> str: return self._src_lang @src_lang.setter def lowercase__ ( self : Dict , __snake_case : str ) -> None: _lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) _lowerCAmelCase = [1] * len(self.prefix_tokens ) _lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase = src_lang _lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case ) _lowerCAmelCase = self.convert_tokens_to_ids(__snake_case ) _lowerCAmelCase = tgt_lang_id return inputs def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(__snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip() return out_string def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __snake_case ) elif not os.path.isfile(self.vocab_file ): with open(__snake_case , """wb""" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding: _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case ) def lowercase__ ( self : str ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Dict ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , __snake_case : int ) -> None: _lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id] def lowercase__ ( self : Any , __snake_case : str ) -> None: _lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id]
70
1
'''simple docstring''' A__ : Any =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__ : str =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__ : str ={ 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" assert len(str(lowerCAmelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: _lowerCAmelCase = year // 1_00 _lowerCAmelCase = (5 * (century % 4) + 2) % 7 _lowerCAmelCase = year % 1_00 _lowerCAmelCase = centurian % 12 _lowerCAmelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 _lowerCAmelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) _lowerCAmelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(length - 1 ): _lowerCAmelCase = i for k in range(i + 1 , lowerCAmelCase ): if collection[k] < collection[least]: _lowerCAmelCase = k if least != i: _lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ : str =input('''Enter numbers separated by a comma:\n''').strip() A__ : Optional[int] =[int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
70
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[int] = (UniPCMultistepScheduler,) _lowercase: List[str] = (('''num_inference_steps''', 25),) def lowercase__ ( self : Union[str, Any] , **__snake_case : Dict ) -> List[Any]: _lowerCAmelCase = { """num_train_timesteps""": 10_00, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, """solver_type""": """bh2""", } config.update(**__snake_case ) return config def lowercase__ ( self : Dict , __snake_case : Any=0 , **__snake_case : Union[str, Any] ) -> List[str]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("""num_inference_steps""" , __snake_case ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**__snake_case ) _lowerCAmelCase = scheduler_class(**__snake_case ) scheduler.set_timesteps(__snake_case ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__snake_case ) _lowerCAmelCase = scheduler_class.from_pretrained(__snake_case ) new_scheduler.set_timesteps(__snake_case ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase = sample, sample for t in range(__snake_case , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample _lowerCAmelCase = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase__ ( self : Optional[int] , __snake_case : List[Any]=0 , **__snake_case : Tuple ) -> Tuple: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("""num_inference_steps""" , __snake_case ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**__snake_case ) scheduler.set_timesteps(__snake_case ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__snake_case ) _lowerCAmelCase = scheduler_class.from_pretrained(__snake_case ) # copy over dummy past residuals new_scheduler.set_timesteps(__snake_case ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample _lowerCAmelCase = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase__ ( self : Optional[Any] , __snake_case : Optional[Any]=None , **__snake_case : str ) -> Optional[Any]: if scheduler is None: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**__snake_case ) _lowerCAmelCase = scheduler_class(**__snake_case ) _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**__snake_case ) _lowerCAmelCase = scheduler_class(**__snake_case ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__snake_case ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(__snake_case , __snake_case ) _lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample return sample def lowercase__ ( self : Dict ) -> Tuple: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("""num_inference_steps""" , __snake_case ) for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**__snake_case ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__snake_case , """set_timesteps""" ): scheduler.set_timesteps(__snake_case ) elif num_inference_steps is not None and not hasattr(__snake_case , """set_timesteps""" ): _lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] _lowerCAmelCase = scheduler.timesteps[5] _lowerCAmelCase = scheduler.timesteps[6] _lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample _lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase__ ( self : int ) -> Any: # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = self.full_loop(scheduler=__snake_case ) _lowerCAmelCase = torch.mean(torch.abs(__snake_case ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 _lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = self.full_loop(scheduler=__snake_case ) _lowerCAmelCase = torch.mean(torch.abs(__snake_case ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 def lowercase__ ( self : str ) -> Union[str, Any]: for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=__snake_case ) def lowercase__ ( self : Any ) -> Union[str, Any]: self.check_over_configs(thresholding=__snake_case ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__snake_case , prediction_type=__snake_case , sample_max_value=__snake_case , solver_order=__snake_case , solver_type=__snake_case , ) def lowercase__ ( self : Any ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__snake_case ) def lowercase__ ( self : Any ) -> Any: for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , ) _lowerCAmelCase = self.full_loop( solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , ) assert not torch.isnan(__snake_case ).any(), "Samples have nan numbers" def lowercase__ ( self : List[Any] ) -> str: self.check_over_configs(lower_order_final=__snake_case ) self.check_over_configs(lower_order_final=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Any: for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=__snake_case , time_step=0 ) def lowercase__ ( self : int ) -> Any: _lowerCAmelCase = self.full_loop() _lowerCAmelCase = torch.mean(torch.abs(__snake_case ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 def lowercase__ ( self : Optional[Any] ) -> Dict: _lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" ) _lowerCAmelCase = torch.mean(torch.abs(__snake_case ) ) assert abs(result_mean.item() - 0.10_14 ) < 1E-3 def lowercase__ ( self : str ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(thresholding=__snake_case , dynamic_thresholding_ratio=0 ) _lowerCAmelCase = scheduler_class(**__snake_case ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__snake_case ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(__snake_case , __snake_case ) _lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample assert sample.dtype == torch.floataa def lowercase__ ( self : Any , **__snake_case : List[Any] ) -> List[str]: for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**__snake_case ) _lowerCAmelCase = scheduler_class(**__snake_case ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
70
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = offset _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict: _lowerCAmelCase = image.astype(np.floataa ) if offset: _lowerCAmelCase = image - (scale / 2) return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__snake_case ) if do_resize: _lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) if do_center_crop: _lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case ) if do_rescale: _lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case ) if do_normalize: _lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) _lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case ) return image def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = offset if offset is not None else self.offset _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__snake_case ) _lowerCAmelCase = [ [ self._preprocess_image( image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
70
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A__ : Any =logging.get_logger(__name__) A__ : List[Any] ='''▁''' A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] ={ '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A__ : Dict ={ '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCAmelCase ( snake_case_ ): _lowercase: int = VOCAB_FILES_NAMES _lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase: str = ['''input_ids''', '''attention_mask'''] _lowercase: List[int] = [] _lowercase: List[int] = [] def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase = legacy_behaviour super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__snake_case ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase = 1 _lowerCAmelCase = len(self.sp_model ) _lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case ) } _lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" _lowerCAmelCase = self.lang_code_to_id[self._src_lang] _lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> List[str]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None _lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase__ ( self : List[Any] ) -> Any: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase__ ( self : int ) -> str: return self._src_lang @src_lang.setter def lowercase__ ( self : Dict , __snake_case : str ) -> None: _lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) _lowerCAmelCase = [1] * len(self.prefix_tokens ) _lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase = src_lang _lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case ) _lowerCAmelCase = self.convert_tokens_to_ids(__snake_case ) _lowerCAmelCase = tgt_lang_id return inputs def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(__snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip() return out_string def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __snake_case ) elif not os.path.isfile(self.vocab_file ): with open(__snake_case , """wb""" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding: _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case ) def lowercase__ ( self : str ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Dict ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , __snake_case : int ) -> None: _lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id] def lowercase__ ( self : Any , __snake_case : str ) -> None: _lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id]
70
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( snake_case_ ): _lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer'''] _lowercase: int = '''AutoImageProcessor''' _lowercase: Optional[int] = '''AutoTokenizer''' def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__snake_case , __snake_case ) _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) _lowerCAmelCase = kwargs.pop("""images""" , __snake_case ) _lowerCAmelCase = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _lowerCAmelCase = args[0] _lowerCAmelCase = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any: return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def lowercase__ ( self : int ) -> Optional[Any]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer yield _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple: if added_vocab is None: _lowerCAmelCase = self.tokenizer.get_added_vocab() _lowerCAmelCase = {} while tokens: _lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE ) if start_token is None: break _lowerCAmelCase = start_token.group(1 ) _lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE ) _lowerCAmelCase = start_token.group() if end_token is None: _lowerCAmelCase = tokens.replace(__snake_case , """""" ) else: _lowerCAmelCase = end_token.group() _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE ) if content is not None: _lowerCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case ) if value: if len(__snake_case ) == 1: _lowerCAmelCase = value[0] _lowerCAmelCase = value else: # leaf nodes _lowerCAmelCase = [] for leaf in content.split(R"""<sep/>""" ): _lowerCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(__snake_case ) if len(output[key] ) == 1: _lowerCAmelCase = output[key][0] _lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case ) if len(__snake_case ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , ) return self.image_processor_class @property def lowercase__ ( self : List[Any] ) -> Any: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , ) return self.image_processor
70
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A__ : Dict =logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class UpperCAmelCase ( snake_case_ ): def __init__( self : List[Any] , *__snake_case : str , **__snake_case : List[Any] ) -> Tuple: super().__init__(*__snake_case , **__snake_case ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def lowercase__ ( self : Any , __snake_case : int=None ) -> List[Any]: _lowerCAmelCase = {} if top_k is not None: _lowerCAmelCase = top_k return {}, {}, postprocess_params def __call__( self : Optional[Any] , __snake_case : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__snake_case : Optional[int] ) -> int: return super().__call__(__snake_case , **__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> int: _lowerCAmelCase = load_image(__snake_case ) _lowerCAmelCase = self.image_processor(images=__snake_case , return_tensors=self.framework ) return model_inputs def lowercase__ ( self : List[str] , __snake_case : Dict ) -> Any: _lowerCAmelCase = self.model(**__snake_case ) return model_outputs def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=5 ) -> Dict: if top_k > self.model.config.num_labels: _lowerCAmelCase = self.model.config.num_labels if self.framework == "pt": _lowerCAmelCase = model_outputs.logits.softmax(-1 )[0] _lowerCAmelCase , _lowerCAmelCase = probs.topk(__snake_case ) elif self.framework == "tf": _lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0] _lowerCAmelCase = tf.math.top_k(__snake_case , k=__snake_case ) _lowerCAmelCase , _lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}" ) _lowerCAmelCase = scores.tolist() _lowerCAmelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
70
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) A__ : Any ='''hf-internal-testing/tiny-random-bert''' A__ : List[str] =os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') A__ : List[str] ='''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> int: _lowerCAmelCase = cached_file(__snake_case , __snake_case ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(__snake_case ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(__snake_case , __snake_case ) ) ) with open(os.path.join(__snake_case , """refs""" , """main""" ) ) as f: _lowerCAmelCase = f.read() self.assertEqual(__snake_case , os.path.join(__snake_case , """snapshots""" , __snake_case , __snake_case ) ) self.assertTrue(os.path.isfile(__snake_case ) ) # File is cached at the same place the second time. _lowerCAmelCase = cached_file(__snake_case , __snake_case ) self.assertEqual(__snake_case , __snake_case ) # Using a specific revision to test the full commit hash. _lowerCAmelCase = cached_file(__snake_case , __snake_case , revision="""9b8c223""" ) self.assertEqual(__snake_case , os.path.join(__snake_case , """snapshots""" , __snake_case , __snake_case ) ) def lowercase__ ( self : int ) -> Optional[int]: with self.assertRaisesRegex(__snake_case , """is not a valid model identifier""" ): _lowerCAmelCase = cached_file("""tiny-random-bert""" , __snake_case ) with self.assertRaisesRegex(__snake_case , """is not a valid git identifier""" ): _lowerCAmelCase = cached_file(__snake_case , __snake_case , revision="""aaaa""" ) with self.assertRaisesRegex(__snake_case , """does not appear to have a file named""" ): _lowerCAmelCase = cached_file(__snake_case , """conf""" ) def lowercase__ ( self : List[Any] ) -> List[str]: with self.assertRaisesRegex(__snake_case , """does not appear to have a file named""" ): _lowerCAmelCase = cached_file(__snake_case , """conf""" ) with open(os.path.join(__snake_case , """refs""" , """main""" ) ) as f: _lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(__snake_case , """.no_exist""" , __snake_case , """conf""" ) ) ) _lowerCAmelCase = cached_file(__snake_case , """conf""" , _raise_exceptions_for_missing_entries=__snake_case ) self.assertIsNone(__snake_case ) _lowerCAmelCase = cached_file(__snake_case , """conf""" , local_files_only=__snake_case , _raise_exceptions_for_missing_entries=__snake_case ) self.assertIsNone(__snake_case ) _lowerCAmelCase = mock.Mock() _lowerCAmelCase = 5_00 _lowerCAmelCase = {} _lowerCAmelCase = HTTPError _lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head: _lowerCAmelCase = cached_file(__snake_case , """conf""" , _raise_exceptions_for_connection_errors=__snake_case ) self.assertIsNone(__snake_case ) # This check we did call the fake head request mock_head.assert_called() def lowercase__ ( self : str ) -> Optional[int]: self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __snake_case ) ) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __snake_case ) ) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __snake_case ) ) def lowercase__ ( self : Dict ) -> str: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(__snake_case , """is not a valid model identifier""" ): get_file_from_repo("""bert-base-case""" , __snake_case ) # The function raises if the revision does not exist. with self.assertRaisesRegex(__snake_case , """is not a valid git identifier""" ): get_file_from_repo("""bert-base-cased""" , __snake_case , revision="""ahaha""" ) _lowerCAmelCase = get_file_from_repo("""bert-base-cased""" , __snake_case ) # The name is the cached name which is not very easy to test, so instead we load the content. _lowerCAmelCase = json.loads(open(__snake_case , """r""" ).read() ) self.assertEqual(config["""hidden_size"""] , 7_68 ) def lowercase__ ( self : str ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = Path(__snake_case ) / """a.txt""" filename.touch() self.assertEqual(get_file_from_repo(__snake_case , """a.txt""" ) , str(__snake_case ) ) self.assertIsNone(get_file_from_repo(__snake_case , """b.txt""" ) )
70
'''simple docstring''' import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: _lowerCAmelCase = json.load(lowerCAmelCase ) for dpr_record in tqdm(lowerCAmelCase ): _lowerCAmelCase = dpr_record["""question"""] _lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = [randint(-10_00 , 10_00 ) for i in range(10 )] _lowerCAmelCase = randint(-50_00 , 50_00 ) return (arr, r) A__ : Union[str, Any] =make_dataset() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for triplet in permutations(lowerCAmelCase , 3 ): if sum(lowerCAmelCase ) == target: return tuple(sorted(lowerCAmelCase ) ) return (0, 0, 0) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" arr.sort() _lowerCAmelCase = len(lowerCAmelCase ) for i in range(n - 1 ): _lowerCAmelCase , _lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowerCAmelCase = """ triplet_sum1(*dataset) """ _lowerCAmelCase = """ triplet_sum2(*dataset) """ _lowerCAmelCase = repeat(setup=lowerCAmelCase , stmt=lowerCAmelCase , repeat=5 , number=1_00_00 ) _lowerCAmelCase = repeat(setup=lowerCAmelCase , stmt=lowerCAmelCase , repeat=5 , number=1_00_00 ) return (min(lowerCAmelCase ), min(lowerCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() A__ : List[Any] =solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = {} _lowerCAmelCase = tokenizer(example["""content"""] , truncation=lowerCAmelCase )["""input_ids"""] _lowerCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output A__ : int =HfArgumentParser(PretokenizationArguments) A__ : Dict =parser.parse_args() if args.num_workers is None: A__ : int =multiprocessing.cpu_count() A__ : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_dir) A__ : Tuple =time.time() A__ : Optional[int] =load_dataset(args.dataset_name, split='''train''') print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") A__ : Union[str, Any] =time.time() A__ : Optional[int] =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") A__ : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
70
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ): """simple docstring""" _lowerCAmelCase = size[0] - overlap_pixels * 2 _lowerCAmelCase = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 _lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 ) if "l" in remove_borders: _lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(lowerCAmelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowerCAmelCase , (original_slice, 0) ) return result def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _lowerCAmelCase = tile.crop(lowerCAmelCase ) return tile def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = n % d return n - divisor class UpperCAmelCase ( snake_case_ ): def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int: super().__init__( vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , ) def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int: torch.manual_seed(0 ) _lowerCAmelCase = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size ) _lowerCAmelCase = image.crop(__snake_case ) _lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _lowerCAmelCase = translated_slice_x - (original_image_slice / 2) _lowerCAmelCase = max(0 , __snake_case ) _lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = to_input.size _lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0] _lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case ) _lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) _lowerCAmelCase = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , ) final_image.paste( __snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case ) @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str: _lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) _lowerCAmelCase = math.ceil(image.size[0] / tile_size ) _lowerCAmelCase = math.ceil(image.size[1] / tile_size ) _lowerCAmelCase = tcx * tcy _lowerCAmelCase = 0 for y in range(__snake_case ): for x in range(__snake_case ): self._process_tile( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to("""cuda""" ) _lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(lowerCAmelCase ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) _lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
70
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: int = KandinskyVaaImgaImgPipeline _lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase: Optional[int] = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase: Tuple = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase: List[str] = False @property def lowercase__ ( self : str ) -> List[str]: return 32 @property def lowercase__ ( self : Optional[int] ) -> List[Any]: return 32 @property def lowercase__ ( self : Tuple ) -> str: return self.time_input_dim @property def lowercase__ ( self : Any ) -> Optional[int]: return self.time_input_dim * 4 @property def lowercase__ ( self : int ) -> Optional[Any]: return 1_00 @property def lowercase__ ( self : int ) -> Dict: torch.manual_seed(0 ) _lowerCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase = UNetaDConditionModel(**__snake_case ) return model @property def lowercase__ ( self : Union[str, Any] ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Dict ) -> str: torch.manual_seed(0 ) _lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Optional[int] ) -> Optional[int]: _lowerCAmelCase = self.dummy_unet _lowerCAmelCase = self.dummy_movq _lowerCAmelCase = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase = DDIMScheduler(**__snake_case ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __snake_case ) # create init_image _lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) ) _lowerCAmelCase = output.images _lowerCAmelCase = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Any ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> Dict: _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase = """A red cartoon frog, 4k""" _lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) _lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase = pipeline( image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
70
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants A__ : List[Any] =Mapping[str, np.ndarray] A__ : Dict =Mapping[str, Any] # Is a nested dict. A__ : Optional[Any] =0.01 @dataclasses.dataclass(frozen=snake_case_ ) class UpperCAmelCase : _lowercase: np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. _lowercase: np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. _lowercase: np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. _lowercase: np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. _lowercase: np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions _lowercase: Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files _lowercase: Optional[str] = None # Templates used to generate this protein (prediction-only) _lowercase: Optional[Sequence[str]] = None # Chain corresponding to each parent _lowercase: Optional[Sequence[int]] = None def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = r"""(\[[A-Z]+\]\n)""" _lowerCAmelCase = [tag.strip() for tag in re.split(lowerCAmelCase , lowerCAmelCase ) if len(lowerCAmelCase ) > 0] _lowerCAmelCase = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] ) _lowerCAmelCase = ["N", "CA", "C"] _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None for g in groups: if "[PRIMARY]" == g[0]: _lowerCAmelCase = g[1][0].strip() for i in range(len(lowerCAmelCase ) ): if seq[i] not in residue_constants.restypes: _lowerCAmelCase = """X""" # FIXME: strings are immutable _lowerCAmelCase = np.array( [residue_constants.restype_order.get(lowerCAmelCase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: _lowerCAmelCase = [] for axis in range(3 ): tertiary.append(list(map(lowerCAmelCase , g[1][axis].split() ) ) ) _lowerCAmelCase = np.array(lowerCAmelCase ) _lowerCAmelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowerCAmelCase ): _lowerCAmelCase = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: _lowerCAmelCase = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) ) _lowerCAmelCase = np.zeros( ( len(lowerCAmelCase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowerCAmelCase ): _lowerCAmelCase = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowerCAmelCase , atom_mask=lowerCAmelCase , aatype=lowerCAmelCase , residue_index=np.arange(len(lowerCAmelCase ) ) , b_factors=lowerCAmelCase , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 0 ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) _lowerCAmelCase = prot.parents _lowerCAmelCase = prot.parents_chain_index if parents is not None and parents_chain_index is not None: _lowerCAmelCase = [p for i, p in zip(lowerCAmelCase , lowerCAmelCase ) if i == chain_id] if parents is None or len(lowerCAmelCase ) == 0: _lowerCAmelCase = ["""N/A"""] pdb_headers.append(f"PARENT {' '.join(lowerCAmelCase )}" ) return pdb_headers def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = pdb_str.split("""\n""" ) _lowerCAmelCase = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) _lowerCAmelCase = 42 if prot.parents is not None and len(prot.parents ) > 0: _lowerCAmelCase = [] if prot.parents_chain_index is not None: _lowerCAmelCase = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowerCAmelCase ) , [] ) parent_dict[str(lowerCAmelCase )].append(lowerCAmelCase ) _lowerCAmelCase = max([int(lowerCAmelCase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): _lowerCAmelCase = parent_dict.get(str(lowerCAmelCase ) , ["""N/A"""] ) parents_per_chain.append(lowerCAmelCase ) else: parents_per_chain.append(list(prot.parents ) ) else: _lowerCAmelCase = [["""N/A"""]] def make_parent_line(lowerCAmelCase ) -> str: return f"PARENT {' '.join(lowerCAmelCase )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) _lowerCAmelCase = 0 for i, l in enumerate(lowerCAmelCase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowerCAmelCase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowerCAmelCase ): _lowerCAmelCase = parents_per_chain[chain_counter] else: _lowerCAmelCase = ["""N/A"""] out_pdb_lines.append(make_parent_line(lowerCAmelCase ) ) return "\n".join(lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = residue_constants.restypes + ["""X"""] def res_atoa(lowerCAmelCase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , """UNK""" ) _lowerCAmelCase = residue_constants.atom_types _lowerCAmelCase = [] _lowerCAmelCase = prot.atom_mask _lowerCAmelCase = prot.aatype _lowerCAmelCase = prot.atom_positions _lowerCAmelCase = prot.residue_index.astype(np.intaa ) _lowerCAmelCase = prot.b_factors _lowerCAmelCase = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("""Invalid aatypes.""" ) _lowerCAmelCase = get_pdb_headers(lowerCAmelCase ) if len(lowerCAmelCase ) > 0: pdb_lines.extend(lowerCAmelCase ) _lowerCAmelCase = aatype.shape[0] _lowerCAmelCase = 1 _lowerCAmelCase = 0 _lowerCAmelCase = string.ascii_uppercase _lowerCAmelCase = None # Add all atom sites. for i in range(lowerCAmelCase ): _lowerCAmelCase = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowerCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue _lowerCAmelCase = """ATOM""" _lowerCAmelCase = atom_name if len(lowerCAmelCase ) == 4 else f" {atom_name}" _lowerCAmelCase = """""" _lowerCAmelCase = """""" _lowerCAmelCase = 1.00 _lowerCAmelCase = atom_name[0] # Protein supports only C, N, O, S, this works. _lowerCAmelCase = """""" _lowerCAmelCase = """A""" if chain_index is not None: _lowerCAmelCase = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! _lowerCAmelCase = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(lowerCAmelCase ) atom_index += 1 _lowerCAmelCase = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: _lowerCAmelCase = True _lowerCAmelCase = chain_index[i + 1] if should_terminate: # Close the chain. _lowerCAmelCase = """TER""" _lowerCAmelCase = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(lowerCAmelCase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowerCAmelCase , lowerCAmelCase ) ) pdb_lines.append("""END""" ) pdb_lines.append("""""" ) return "\n".join(lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ): """simple docstring""" return Protein( aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=lowerCAmelCase , remark=lowerCAmelCase , parents=lowerCAmelCase , parents_chain_index=lowerCAmelCase , )
70
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) ) class UpperCAmelCase : def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_encoder_blocks _lowerCAmelCase = sr_ratios _lowerCAmelCase = depths _lowerCAmelCase = hidden_sizes _lowerCAmelCase = downsampling_rates _lowerCAmelCase = num_attention_heads _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = scope def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[Any] ) -> List[str]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple: _lowerCAmelCase = SegformerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]: _lowerCAmelCase = 1 _lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : Optional[int] ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _lowercase: Tuple = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase: Tuple = True _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Optional[Any] = False def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = SegformerModelTester(self ) _lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Dict: self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Dict ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case ) def lowercase__ ( self : Dict ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__snake_case ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Union[str, Any]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def lowercase__ ( self : Optional[int] ) -> int: pass def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions _lowerCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(__snake_case ) , __snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _lowerCAmelCase = (self.model_tester.image_size // 32) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _lowerCAmelCase = len(__snake_case ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) self.assertEqual(out_len + 1 , len(__snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowercase__ ( self : int ) -> List[str]: def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ): _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase__ ( self : Optional[Any] ) -> Any: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__snake_case ): continue _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = model(**__snake_case ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Tuple ) -> Dict: pass @slow def lowercase__ ( self : str ) -> Optional[int]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SegformerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Union[str, Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def lowercase__ ( self : Optional[Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) ) @slow def lowercase__ ( self : Any ) -> str: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = outputs.logits.detach().cpu() _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] ) _lowerCAmelCase = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __snake_case ) _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) _lowerCAmelCase = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , __snake_case )
70
1
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase ( unittest.TestCase ): def __init__( self : Tuple , __snake_case : Tuple , __snake_case : Any=13 , __snake_case : Dict=7 , __snake_case : List[Any]=True , __snake_case : List[Any]=True , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Dict=99 , __snake_case : Optional[int]=32 , __snake_case : Optional[int]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : int=0.1 , __snake_case : int=5_12 , __snake_case : Union[str, Any]=16 , __snake_case : List[Any]=2 , __snake_case : int=0.02 , __snake_case : List[Any]=4 , ) -> int: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_attention_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_choices def lowercase__ ( self : List[Any] ) -> Tuple: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_attention_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Union[str, Any] ) -> str: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: int = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self : List[str] ) -> Optional[int]: _lowerCAmelCase = FlaxAlbertModelTester(self ) @slow def lowercase__ ( self : str ) -> Optional[Any]: for model_class_name in self.all_model_classes: _lowerCAmelCase = model_class_name.from_pretrained("""albert-base-v2""" ) _lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__snake_case ) @require_flax class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> Tuple: _lowerCAmelCase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) _lowerCAmelCase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) _lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case )[0] _lowerCAmelCase = (1, 11, 7_68) self.assertEqual(output.shape , __snake_case ) _lowerCAmelCase = np.array( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
70
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A__ : Dict =get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: Optional[Any] = XLMRobertaTokenizer _lowercase: Any = XLMRobertaTokenizerFast _lowercase: int = True _lowercase: Union[str, Any] = True def lowercase__ ( self : List[Any] ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Optional[Any] ) -> List[str]: _lowerCAmelCase = """<pad>""" _lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def lowercase__ ( self : Dict ) -> Union[str, Any]: _lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(__snake_case ) , 10_02 ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def lowercase__ ( self : Dict ) -> Dict: _lowerCAmelCase = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case ) _lowerCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) _lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(__snake_case ) self.assertListEqual( __snake_case , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(__snake_case ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowercase__ ( self : List[str] ) -> Tuple: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) _lowerCAmelCase = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = tokenizer_r.save_pretrained(__snake_case ) _lowerCAmelCase = tokenizer_p.save_pretrained(__snake_case ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) _lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(__snake_case , __snake_case ) # Checks everything loads correctly in the same way _lowerCAmelCase = tokenizer_r.from_pretrained(__snake_case ) _lowerCAmelCase = tokenizer_p.from_pretrained(__snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__snake_case , __snake_case ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__snake_case ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case ) _lowerCAmelCase = tokenizer_p.save_pretrained(__snake_case ) # Checks it save with the same files self.assertSequenceEqual(__snake_case , __snake_case ) # Checks everything loads correctly in the same way _lowerCAmelCase = tokenizer_r.from_pretrained(__snake_case ) _lowerCAmelCase = tokenizer_p.from_pretrained(__snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__snake_case , __snake_case ) ) shutil.rmtree(__snake_case ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case ) _lowerCAmelCase = tokenizer_p.save_pretrained(__snake_case ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase = tokenizer_r.from_pretrained(__snake_case ) _lowerCAmelCase = tokenizer_p.from_pretrained(__snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__snake_case , __snake_case ) ) shutil.rmtree(__snake_case ) @cached_property def lowercase__ ( self : Dict ) -> List[Any]: return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__snake_case , f.name ) _lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__snake_case ) _lowerCAmelCase = pickle.dumps(__snake_case ) pickle.loads(__snake_case ) def lowercase__ ( self : Optional[int] ) -> List[str]: if not self.test_rust_tokenizer: return _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = """I was born in 92000, and this is falsé.""" _lowerCAmelCase = tokenizer.tokenize(__snake_case ) _lowerCAmelCase = rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) _lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) _lowerCAmelCase = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = tokenizer.encode(__snake_case ) _lowerCAmelCase = rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) @slow def lowercase__ ( self : Dict ) -> List[str]: _lowerCAmelCase = """Hello World!""" _lowerCAmelCase = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) ) @slow def lowercase__ ( self : Optional[Any] ) -> Optional[int]: _lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _lowerCAmelCase = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) ) @slow def lowercase__ ( self : str ) -> Any: # fmt: off _lowerCAmelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
70
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : List[str] =logging.get_logger(__name__) A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : Any ={ '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': 15_36, '''junnyu/roformer_chinese_base''': 15_36, '''junnyu/roformer_chinese_char_small''': 5_12, '''junnyu/roformer_chinese_char_base''': 5_12, '''junnyu/roformer_small_discriminator''': 1_28, '''junnyu/roformer_small_generator''': 1_28, } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = VOCAB_FILES_NAMES _lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP _lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: str = PRETRAINED_INIT_CONFIGURATION _lowercase: List[Any] = RoFormerTokenizer def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]: super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents ): _lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) ) _lowerCAmelCase = do_lower_case _lowerCAmelCase = strip_accents _lowerCAmelCase = pre_tok_class(**__snake_case ) _lowerCAmelCase = do_lower_case def __getstate__( self : int ) -> Optional[int]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = BertPreTokenizer() return state def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]: _lowerCAmelCase = d _lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab() _lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) ) def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]: _lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: _lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str: _lowerCAmelCase = BertPreTokenizer() return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A__ : str ={ '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] =[ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =[ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline _lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) _lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : List[str] ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ) _lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : Optional[int] ) -> List[Any]: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = StableDiffusionControlNetImgaImgPipeline _lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__snake_case : Optional[Any] ): if isinstance(__snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), ] _lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : List[str] ) -> Dict: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) _lowerCAmelCase = 10.0 _lowerCAmelCase = 4 _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def lowercase__ ( self : int ) -> str: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Optional[Any] ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : int ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) _lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase = """evil space-punk bird""" _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = pipe( __snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) _lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
70
1
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: A__ : int =False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any]=7 , __snake_case : Any=3 , __snake_case : Dict=18 , __snake_case : List[Any]=30 , __snake_case : int=4_00 , __snake_case : List[Any]=None , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : Tuple=None , ) -> Union[str, Any]: _lowerCAmelCase = size if size is not None else {"""height""": 20, """width""": 20} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = do_convert_rgb _lowerCAmelCase = [5_12, 10_24, 20_48, 40_96] _lowerCAmelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def lowercase__ ( self : Any ) -> str: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowercase__ ( self : int ) -> Optional[int]: _lowerCAmelCase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" _lowerCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: str = PixaStructImageProcessor if is_vision_available() else None def lowercase__ ( self : Optional[Any] ) -> Optional[int]: _lowerCAmelCase = PixaStructImageProcessingTester(self ) @property def lowercase__ ( self : List[Any] ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__snake_case , """do_normalize""" ) ) self.assertTrue(hasattr(__snake_case , """do_convert_rgb""" ) ) def lowercase__ ( self : str ) -> List[str]: _lowerCAmelCase = self.image_processor_tester.prepare_dummy_image() _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) _lowerCAmelCase = 20_48 _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" , max_patches=__snake_case ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowercase__ ( self : Any ) -> Tuple: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 _lowerCAmelCase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__snake_case ): _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches _lowerCAmelCase = """Hello""" _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case , header_text=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case , header_text=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , np.ndarray ) _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , torch.Tensor ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: Tuple = PixaStructImageProcessor if is_vision_available() else None def lowercase__ ( self : Optional[Any] ) -> int: _lowerCAmelCase = PixaStructImageProcessingTester(self , num_channels=4 ) _lowerCAmelCase = 3 @property def lowercase__ ( self : List[str] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : Optional[Any] ) -> List[str]: _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__snake_case , """do_normalize""" ) ) self.assertTrue(hasattr(__snake_case , """do_convert_rgb""" ) ) def lowercase__ ( self : Optional[int] ) -> Tuple: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
70
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__ : List[Any] =logging.get_logger(__name__) A__ : Any =torch.device('''cpu''') def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _lowerCAmelCase = k_new.split(""".""" ) if ls[2].isdigit(): _lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase ) else: _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval() hf_model.load_state_dict(lowerCAmelCase ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(lowerCAmelCase ) _lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A__ : Tuple =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
70
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : Any ) -> Tuple: _lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , """embed_dim""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_heads""" ) ) class UpperCAmelCase : def __init__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[Any]=13 , __snake_case : Optional[int]=64 , __snake_case : List[Any]=3 , __snake_case : List[str]=[16, 48, 96] , __snake_case : Optional[int]=[1, 3, 6] , __snake_case : str=[1, 2, 10] , __snake_case : int=[7, 3, 3] , __snake_case : Union[str, Any]=[4, 2, 2] , __snake_case : Union[str, Any]=[2, 1, 1] , __snake_case : Dict=[2, 2, 2] , __snake_case : Dict=[False, False, True] , __snake_case : List[Any]=[0.0, 0.0, 0.0] , __snake_case : int=0.02 , __snake_case : Union[str, Any]=1E-1_2 , __snake_case : Dict=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=2 , ) -> Union[str, Any]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_sizes _lowerCAmelCase = patch_stride _lowerCAmelCase = patch_padding _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = num_labels _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = num_heads _lowerCAmelCase = stride_kv _lowerCAmelCase = depth _lowerCAmelCase = cls_token _lowerCAmelCase = attention_drop_rate _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps def lowercase__ ( self : Dict ) -> List[Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: # create a random int32 tensor of given shape _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Optional[int] ) -> Dict: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Tuple: _lowerCAmelCase = TFCvtModel(config=__snake_case ) _lowerCAmelCase = model(__snake_case , training=__snake_case ) _lowerCAmelCase = (self.image_size, self.image_size) _lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowercase__ ( self : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] ) -> Optional[int]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = TFCvtForImageClassification(__snake_case ) _lowerCAmelCase = model(__snake_case , labels=__snake_case , training=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : List[Any] ) -> Optional[Any]: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: List[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _lowercase: Any = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Optional[int] = False _lowercase: Optional[Any] = False _lowercase: Optional[Any] = False def lowercase__ ( self : Any ) -> List[str]: _lowerCAmelCase = TFCvtModelTester(self ) _lowerCAmelCase = TFCvtConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def lowercase__ ( self : Dict ) -> int: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def lowercase__ ( self : str ) -> List[str]: pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def lowercase__ ( self : int ) -> List[str]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) def lowercase__ ( self : List[Any] ) -> Any: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def lowercase__ ( self : Any ) -> List[Any]: super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def lowercase__ ( self : Dict ) -> str: _lowerCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(__snake_case ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def lowercase__ ( self : Any ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : int ) -> Dict: def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Dict ): _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Any ) -> Any: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case ) @slow def lowercase__ ( self : List[str] ) -> Union[str, Any]: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = TFCvtModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self : Dict ) -> List[Any]: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""tf""" ) # forward pass _lowerCAmelCase = model(**__snake_case ) # verify the logits _lowerCAmelCase = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = tf.constant([0.92_85, 0.90_15, -0.31_50] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1E-4 ) )
70
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A__ : List[Any] =pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_dataset(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_metric(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_names(lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _lowerCAmelCase = expected_configs[0] assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
70
1
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger A__ : Union[str, Any] ='''<<<<<<< This should probably be modified because it mentions: ''' A__ : int ='''======= >>>>>>> ''' A__ : Optional[int] =[ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] A__ : Tuple =[ # (pattern, replacement) # Order is important here for some replacements (r'''tfds\.core''', r'''datasets'''), (r'''tf\.io\.gfile\.GFile''', r'''open'''), (r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''), (r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''), (r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''), (r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''), (r'''tfds\.features\.FeaturesDict\(''', r'''dict('''), (r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (r'''tfds\.''', r'''datasets.'''), (r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''), (r'''self\.builder_config''', r'''self.config'''), ] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase ( snake_case_ ): @staticmethod def lowercase__ ( __snake_case : ArgumentParser ) -> Dict: _lowerCAmelCase = parser.add_parser( """convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , ) train_parser.add_argument( """--tfds_path""" , type=__snake_case , required=__snake_case , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , ) train_parser.add_argument( """--datasets_directory""" , type=__snake_case , required=__snake_case , help="""Path to the HuggingFace Datasets folder.""" ) train_parser.set_defaults(func=__snake_case ) def __init__( self : Union[str, Any] , __snake_case : str , __snake_case : str , *__snake_case : str ) -> List[Any]: _lowerCAmelCase = get_logger("""datasets-cli/converting""" ) _lowerCAmelCase = tfds_path _lowerCAmelCase = datasets_directory def lowercase__ ( self : Union[str, Any] ) -> List[Any]: if os.path.isdir(self._tfds_path ): _lowerCAmelCase = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): _lowerCAmelCase = os.path.dirname(self._tfds_path ) else: raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" ) _lowerCAmelCase = os.path.abspath(self._datasets_directory ) self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = {} if os.path.isdir(self._tfds_path ): _lowerCAmelCase = os.listdir(__snake_case ) else: _lowerCAmelCase = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f"Looking at file {f_name}" ) _lowerCAmelCase = os.path.join(__snake_case , __snake_case ) _lowerCAmelCase = os.path.join(__snake_case , __snake_case ) if not os.path.isfile(__snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("""Skipping file""" ) continue with open(__snake_case , encoding="""utf-8""" ) as f: _lowerCAmelCase = f.readlines() _lowerCAmelCase = [] _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = [] for line in lines: _lowerCAmelCase = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: _lowerCAmelCase = """import datasets\n""" elif "import tensorflow" in out_line: # order is important here _lowerCAmelCase = """""" continue elif "from absl import logging" in out_line: _lowerCAmelCase = """from datasets import logging\n""" elif "getLogger" in out_line: _lowerCAmelCase = out_line.replace("""getLogger""" , """get_logger""" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): _lowerCAmelCase = True _lowerCAmelCase = list(filter(lambda __snake_case : e in out_line , __snake_case ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__snake_case ) + """\n""" ) out_lines.append(__snake_case ) out_lines.append(__snake_case ) continue else: for pattern, replacement in TO_CONVERT: _lowerCAmelCase = re.sub(__snake_case , __snake_case , __snake_case ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: _lowerCAmelCase = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __snake_case ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) ) _lowerCAmelCase = """from . import """ + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"Error converting {out_line.strip()}" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: _lowerCAmelCase = True out_lines.append(__snake_case ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset _lowerCAmelCase = f_name.replace(""".py""" , """""" ) _lowerCAmelCase = os.path.join(__snake_case , __snake_case ) _lowerCAmelCase = os.path.join(__snake_case , __snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) self._logger.info(f"Adding directory {output_dir}" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__snake_case ) if needs_manual_update: with_manual_update.append(__snake_case ) with open(__snake_case , """w""" , encoding="""utf-8""" ) as f: f.writelines(__snake_case ) self._logger.info(f"Converted in {output_file}" ) for utils_file in utils_files: try: _lowerCAmelCase = os.path.basename(__snake_case ) _lowerCAmelCase = imports_to_builder_map[f_name.replace(""".py""" , """""" )] self._logger.info(f"Moving {dest_folder} to {utils_file}" ) shutil.copy(__snake_case , __snake_case ) except KeyError: self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
70
'''simple docstring''' from torch import nn def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
70
1
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return EnvironmentCommand() def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class UpperCAmelCase ( snake_case_ ): @staticmethod def lowercase__ ( __snake_case : ArgumentParser ) -> Optional[Any]: _lowerCAmelCase = parser.add_parser("""env""" ) download_parser.set_defaults(func=__snake_case ) download_parser.add_argument( """--accelerate-config_file""" , default=__snake_case , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=__snake_case ) def __init__( self : Dict , __snake_case : Optional[Any] , *__snake_case : Dict ) -> None: _lowerCAmelCase = accelerate_config_file def lowercase__ ( self : Optional[int] ) -> Dict: _lowerCAmelCase = """not installed""" if is_safetensors_available(): import safetensors _lowerCAmelCase = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors _lowerCAmelCase = f"{safetensors.__version__} but is ignored because of PyTorch version too old." _lowerCAmelCase = """not installed""" _lowerCAmelCase = _lowerCAmelCase = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _lowerCAmelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__snake_case ): _lowerCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict() _lowerCAmelCase = ( """\n""".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(__snake_case , __snake_case ) else f"\t{accelerate_config}" ) _lowerCAmelCase = """not installed""" _lowerCAmelCase = """NA""" if is_torch_available(): import torch _lowerCAmelCase = torch.__version__ _lowerCAmelCase = torch.cuda.is_available() _lowerCAmelCase = """not installed""" _lowerCAmelCase = """NA""" if is_tf_available(): import tensorflow as tf _lowerCAmelCase = tf.__version__ try: # deprecated in v2.1 _lowerCAmelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _lowerCAmelCase = bool(tf.config.list_physical_devices("""GPU""" ) ) _lowerCAmelCase = """not installed""" _lowerCAmelCase = """not installed""" _lowerCAmelCase = """not installed""" _lowerCAmelCase = """NA""" if is_flax_available(): import flax import jax import jaxlib _lowerCAmelCase = flax.__version__ _lowerCAmelCase = jax.__version__ _lowerCAmelCase = jaxlib.__version__ _lowerCAmelCase = jax.lib.xla_bridge.get_backend().platform _lowerCAmelCase = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": f"{safetensors_version}", """Accelerate version""": f"{accelerate_version}", """Accelerate config""": f"{accelerate_config_str}", """PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})", """Tensorflow version (GPU?)""": f"{tf_version} ({tf_cuda_available})", """Flax version (CPU?/GPU?/TPU?)""": f"{flax_version} ({jax_backend})", """Jax version""": f"{jax_version}", """JaxLib version""": f"{jaxlib_version}", """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(__snake_case ) ) return info @staticmethod def lowercase__ ( __snake_case : str ) -> Dict: return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
70
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position A__ : Dict ='''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip A__ : Tuple =concatenate_datasets A__ : Dict =DownloadConfig A__ : int =DownloadManager A__ : Union[str, Any] =DownloadMode A__ : Tuple =DownloadConfig A__ : Optional[Any] =DownloadMode A__ : str =DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available A__ : str ={'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys A__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Tuple ={ '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase : def __init__( self : Tuple , __snake_case : Optional[int] , __snake_case : Dict=13 , __snake_case : str=10 , __snake_case : List[Any]=3 , __snake_case : Dict=2 , __snake_case : List[str]=2 , __snake_case : List[Any]=2 , __snake_case : str=True , __snake_case : List[Any]=True , __snake_case : str=32 , __snake_case : List[Any]=5 , __snake_case : List[Any]=4 , __snake_case : Any=37 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Any=10 , __snake_case : Any=0.02 , __snake_case : List[Any]=0.9 , __snake_case : Union[str, Any]=None , ) -> Optional[int]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = patch_size _lowerCAmelCase = tubelet_size _lowerCAmelCase = num_frames _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = mask_ratio _lowerCAmelCase = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame _lowerCAmelCase = (image_size // patch_size) ** 2 _lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos _lowerCAmelCase = int(mask_ratio * self.seq_length ) def lowercase__ ( self : Optional[Any] ) -> str: _lowerCAmelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[str] ) -> Optional[int]: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def lowercase__ ( self : Optional[int] , __snake_case : str , __snake_case : List[str] , __snake_case : int ) -> Optional[Any]: _lowerCAmelCase = VideoMAEModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[int] , __snake_case : str , __snake_case : int , __snake_case : List[str] ) -> Optional[Any]: _lowerCAmelCase = VideoMAEForPreTraining(__snake_case ) model.to(__snake_case ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch _lowerCAmelCase = torch.ones((self.num_masks,) ) _lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) _lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool() _lowerCAmelCase = model(__snake_case , __snake_case ) # model only returns predictions for masked patches _lowerCAmelCase = mask.sum().item() _lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def lowercase__ ( self : List[Any] ) -> Dict: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: str = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) _lowercase: List[str] = ( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) _lowercase: str = False _lowercase: List[str] = False _lowercase: List[str] = False _lowercase: int = False def lowercase__ ( self : List[str] ) -> str: _lowerCAmelCase = VideoMAEModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int]=False ) -> List[Any]: _lowerCAmelCase = copy.deepcopy(__snake_case ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch _lowerCAmelCase = torch.ones((self.model_tester.num_masks,) ) _lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) _lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool() _lowerCAmelCase = bool_masked_pos.to(__snake_case ) if return_labels: if model_class in [ *get_values(__snake_case ), ]: _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case ) return inputs_dict def lowercase__ ( self : Tuple ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="""VideoMAE does not use inputs_embeds""" ) def lowercase__ ( self : List[Any] ) -> str: pass def lowercase__ ( self : Dict ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def lowercase__ ( self : Optional[Any] ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : str ) -> Any: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__snake_case ) @slow def lowercase__ ( self : int ) -> Any: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = VideoMAEModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def lowercase__ ( self : List[str] ) -> Tuple: if not self.has_attentions: pass else: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks _lowerCAmelCase = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) _lowerCAmelCase = len(__snake_case ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) self.assertEqual(out_len + 1 , len(__snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowercase__ ( self : int ) -> List[str]: def check_hidden_states_output(__snake_case : List[str] , __snake_case : str , __snake_case : List[Any] ): _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__snake_case ) , __snake_case ) _lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks _lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : List[Any] ) -> str: pass def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) _lowerCAmelCase = np.load(lowerCAmelCase ) return list(lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Dict ) -> str: _lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to( __snake_case ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_video() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) # verify the logits _lowerCAmelCase = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) ) @slow def lowercase__ ( self : List[str] ) -> Optional[int]: _lowerCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__snake_case ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_video() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) # add boolean mask, indicating which patches to mask _lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) _lowerCAmelCase = torch.load(__snake_case ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) # verify the logits _lowerCAmelCase = torch.Size([1, 14_08, 15_36] ) _lowerCAmelCase = torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=__snake_case ) self.assertEqual(outputs.logits.shape , __snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) _lowerCAmelCase = torch.tensor([0.51_42] , device=__snake_case ) self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) _lowerCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__snake_case ).to( __snake_case ) with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) _lowerCAmelCase = torch.tensor(torch.tensor([0.64_69] ) , device=__snake_case ) self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1E-4 ) )
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
70
1
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = { """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 20, """a """ * 30, """b """ * 7], } _lowerCAmelCase = Dataset.from_dict(lowerCAmelCase ) return dataset class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : str ) -> Any: _lowerCAmelCase = get_dataset() _lowerCAmelCase = make_duplicate_clusters(__snake_case , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def lowercase__ ( self : str ) -> List[Any]: _lowerCAmelCase = get_dataset() _lowerCAmelCase , _lowerCAmelCase = deduplicate_dataset(__snake_case ) self.assertEqual(len(__snake_case ) , 2 ) print(__snake_case ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __snake_case )
70
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: Tuple = PandasConfig def lowercase__ ( self : Optional[Any] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any: for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) ) yield i, self._cast_table(__snake_case )
70
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase = 10 , lowerCAmelCase = 22 ): """simple docstring""" _lowerCAmelCase = range(1 , lowerCAmelCase ) _lowerCAmelCase = range(1 , lowerCAmelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
70
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase : def __init__( self : str , __snake_case : Any ) -> str: _lowerCAmelCase = str(id_ ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = [] _lowerCAmelCase = {} # {vertex:distance} def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any: return self.key < other.key def __repr__( self : Optional[Any] ) -> Optional[Any]: return self.id def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]: self.neighbors.append(__snake_case ) def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = weight def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = graph[:] while q: _lowerCAmelCase = min(lowerCAmelCase ) q.remove(lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] for i in range(1 , len(lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = list(lowerCAmelCase ) hq.heapify(lowerCAmelCase ) while h: _lowerCAmelCase = hq.heappop(lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] hq.heapify(lowerCAmelCase ) for i in range(1 , len(lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCamelCase__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
70
1
'''simple docstring''' from __future__ import annotations from random import random class UpperCAmelCase : def __init__( self : Any , __snake_case : int | None = None ) -> str: _lowerCAmelCase = value _lowerCAmelCase = random() _lowerCAmelCase = None _lowerCAmelCase = None def __repr__( self : Dict ) -> str: from pprint import pformat if self.left is None and self.right is None: return f"'{self.value}: {self.prior:.5}'" else: return pformat( {f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 ) def __str__( self : Dict ) -> str: _lowerCAmelCase = str(self.value ) + """ """ _lowerCAmelCase = str(self.left or """""" ) _lowerCAmelCase = str(self.right or """""" ) return value + left + right def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: _lowerCAmelCase , _lowerCAmelCase = split(root.left , lowerCAmelCase ) return left, root else: _lowerCAmelCase , _lowerCAmelCase = split(root.right , lowerCAmelCase ) return root, right def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: _lowerCAmelCase = merge(left.right , lowerCAmelCase ) return left else: _lowerCAmelCase = merge(lowerCAmelCase , right.left ) return right def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Node(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , lowerCAmelCase ) return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , value - 1 ) _lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , lowerCAmelCase ) return merge(lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=""",""" ) inorder(root.right ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for arg in args.split(): if arg[0] == "+": _lowerCAmelCase = insert(lowerCAmelCase , int(arg[1:] ) ) elif arg[0] == "-": _lowerCAmelCase = erase(lowerCAmelCase , int(arg[1:] ) ) else: print("""Unknown command""" ) return root def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = None print( """enter numbers to create a tree, + value to add value into treap, """ """- value to erase all nodes with value. 'q' to quit. """ ) _lowerCAmelCase = input() while args != "q": _lowerCAmelCase = interact_treap(lowerCAmelCase , lowerCAmelCase ) print(lowerCAmelCase ) _lowerCAmelCase = input() print("""good by!""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
70
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss _lowerCAmelCase = -(labels.shape[-1] * loss.item()) _lowerCAmelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
70
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Tuple ={'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =[ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys A__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A__ : Any =logging.get_logger(__name__) A__ : List[Any] ='''▁''' A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] ={ '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A__ : Dict ={ '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCAmelCase ( snake_case_ ): _lowercase: int = VOCAB_FILES_NAMES _lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase: str = ['''input_ids''', '''attention_mask'''] _lowercase: List[int] = [] _lowercase: List[int] = [] def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase = legacy_behaviour super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__snake_case ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase = 1 _lowerCAmelCase = len(self.sp_model ) _lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case ) } _lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" _lowerCAmelCase = self.lang_code_to_id[self._src_lang] _lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> List[str]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None _lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase__ ( self : List[Any] ) -> Any: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase__ ( self : int ) -> str: return self._src_lang @src_lang.setter def lowercase__ ( self : Dict , __snake_case : str ) -> None: _lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) _lowerCAmelCase = [1] * len(self.prefix_tokens ) _lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase = src_lang _lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case ) _lowerCAmelCase = self.convert_tokens_to_ids(__snake_case ) _lowerCAmelCase = tgt_lang_id return inputs def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(__snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip() return out_string def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __snake_case ) elif not os.path.isfile(self.vocab_file ): with open(__snake_case , """wb""" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding: _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case ) def lowercase__ ( self : str ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Dict ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , __snake_case : int ) -> None: _lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id] def lowercase__ ( self : Any , __snake_case : str ) -> None: _lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id]
70
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if (len(lowerCAmelCase ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowerCAmelCase ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(length - 1 ): _lowerCAmelCase = i for k in range(i + 1 , lowerCAmelCase ): if collection[k] < collection[least]: _lowerCAmelCase = k if least != i: _lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ : str =input('''Enter numbers separated by a comma:\n''').strip() A__ : Optional[int] =[int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
70
1
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : str ) -> Optional[Any]: _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) _lowerCAmelCase = """xvjiarui/stable-diffusion-2-inpainting""" _lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(__snake_case , safety_checker=__snake_case ) _lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" _lowerCAmelCase = jax.random.PRNGKey(0 ) _lowerCAmelCase = 50 _lowerCAmelCase = jax.device_count() _lowerCAmelCase = num_samples * [prompt] _lowerCAmelCase = num_samples * [init_image] _lowerCAmelCase = num_samples * [mask_image] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pipeline.prepare_inputs(__snake_case , __snake_case , __snake_case ) # shard inputs and rng _lowerCAmelCase = replicate(__snake_case ) _lowerCAmelCase = jax.random.split(__snake_case , jax.device_count() ) _lowerCAmelCase = shard(__snake_case ) _lowerCAmelCase = shard(__snake_case ) _lowerCAmelCase = shard(__snake_case ) _lowerCAmelCase = pipeline( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ) _lowerCAmelCase = output.images.reshape(__snake_case , 5_12 , 5_12 , 3 ) _lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1] _lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _lowerCAmelCase = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
70
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = offset _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict: _lowerCAmelCase = image.astype(np.floataa ) if offset: _lowerCAmelCase = image - (scale / 2) return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__snake_case ) if do_resize: _lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) if do_center_crop: _lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case ) if do_rescale: _lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case ) if do_normalize: _lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) _lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case ) return image def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = offset if offset is not None else self.offset _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__snake_case ) _lowerCAmelCase = [ [ self._preprocess_image( image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
70
1
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue _lowerCAmelCase = cst_fwd.get(lowerCAmelCase , np.inf ) _lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _lowerCAmelCase = new_cost_f _lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = -1 _lowerCAmelCase = set() _lowerCAmelCase = set() _lowerCAmelCase = {source: 0} _lowerCAmelCase = {destination: 0} _lowerCAmelCase = {source: None} _lowerCAmelCase = {destination: None} _lowerCAmelCase = PriorityQueue() _lowerCAmelCase = PriorityQueue() _lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _lowerCAmelCase , _lowerCAmelCase = queue_forward.get() visited_forward.add(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = queue_backward.get() visited_backward.add(lowerCAmelCase ) _lowerCAmelCase = pass_and_relaxation( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) _lowerCAmelCase = pass_and_relaxation( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _lowerCAmelCase = shortest_distance return shortest_path_distance A__ : Optional[int] ={ '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__ : int ={ '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
70
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( snake_case_ ): _lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer'''] _lowercase: int = '''AutoImageProcessor''' _lowercase: Optional[int] = '''AutoTokenizer''' def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__snake_case , __snake_case ) _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) _lowerCAmelCase = kwargs.pop("""images""" , __snake_case ) _lowerCAmelCase = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _lowerCAmelCase = args[0] _lowerCAmelCase = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any: return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def lowercase__ ( self : int ) -> Optional[Any]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer yield _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple: if added_vocab is None: _lowerCAmelCase = self.tokenizer.get_added_vocab() _lowerCAmelCase = {} while tokens: _lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE ) if start_token is None: break _lowerCAmelCase = start_token.group(1 ) _lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE ) _lowerCAmelCase = start_token.group() if end_token is None: _lowerCAmelCase = tokens.replace(__snake_case , """""" ) else: _lowerCAmelCase = end_token.group() _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE ) if content is not None: _lowerCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case ) if value: if len(__snake_case ) == 1: _lowerCAmelCase = value[0] _lowerCAmelCase = value else: # leaf nodes _lowerCAmelCase = [] for leaf in content.split(R"""<sep/>""" ): _lowerCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(__snake_case ) if len(output[key] ) == 1: _lowerCAmelCase = output[key][0] _lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case ) if len(__snake_case ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , ) return self.image_processor_class @property def lowercase__ ( self : List[Any] ) -> Any: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , ) return self.image_processor
70
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) + 1 _lowerCAmelCase = len(lowerCAmelCase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. _lowerCAmelCase = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )] # since string of zero length match pattern of zero length _lowerCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , lowerCAmelCase ): _lowerCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , lowerCAmelCase ): _lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , lowerCAmelCase ): for j in range(1 , lowerCAmelCase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": _lowerCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: _lowerCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): _lowerCAmelCase = dp[i - 1][j] else: _lowerCAmelCase = 0 else: _lowerCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A__ : List[Any] ='''aab''' A__ : Optional[int] ='''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F"""{input_string} matches the given pattern {pattern}""") else: print(F"""{input_string} does not match with the given pattern {pattern}""")
70
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
1
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: Any = FlaxAutoencoderKL @property def lowercase__ ( self : Tuple ) -> Optional[Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = jax.random.PRNGKey(0 ) _lowerCAmelCase = jax.random.uniform(__snake_case , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowercase__ ( self : str ) -> List[Any]: _lowerCAmelCase = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict
70
'''simple docstring''' import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: _lowerCAmelCase = json.load(lowerCAmelCase ) for dpr_record in tqdm(lowerCAmelCase ): _lowerCAmelCase = dpr_record["""question"""] _lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP A__ : Any =False try: A__ : List[Any] =_is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class UpperCAmelCase : def __init__( self : Dict , __snake_case : str = None , __snake_case : list = [] ) -> Any: _lowerCAmelCase = 0 _lowerCAmelCase = choices _lowerCAmelCase = prompt if sys.platform == "win32": _lowerCAmelCase = """*""" else: _lowerCAmelCase = """➔ """ def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : str = "" ) -> Any: if sys.platform != "win32": writeColor(self.choices[index] , 32 , __snake_case ) else: forceWrite(self.choices[index] , __snake_case ) def lowercase__ ( self : str , __snake_case : int ) -> Optional[int]: if index == self.position: forceWrite(f" {self.arrow_char} " ) self.write_choice(__snake_case ) else: forceWrite(f" {self.choices[index]}" ) reset_cursor() def lowercase__ ( self : Union[str, Any] , __snake_case : Direction , __snake_case : int = 1 ) -> Any: _lowerCAmelCase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__snake_case ) move_cursor(__snake_case , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def lowercase__ ( self : Tuple ) -> Optional[Any]: self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def lowercase__ ( self : int ) -> Dict: self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def lowercase__ ( self : List[str] ) -> Optional[Any]: move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def lowercase__ ( self : Tuple ) -> Any: move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__snake_case )] for number in range(10 )] ) def lowercase__ ( self : Any ) -> List[str]: _lowerCAmelCase = int(chr(self.current_selection ) ) _lowerCAmelCase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __snake_case ) else: return else: return def lowercase__ ( self : Tuple , __snake_case : int = 0 ) -> Optional[Any]: if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) _lowerCAmelCase = default_choice for i in range(len(self.choices ) ): self.print_choice(__snake_case ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: _lowerCAmelCase = int(builtins.input() ) except ValueError: _lowerCAmelCase = default_choice else: _lowerCAmelCase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(__snake_case , """\n""" ) return choice
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness A__ : str ='''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' A__ : Any ='''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' A__ : int =''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' A__ : str =''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' A__ : Optional[int] ='''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def lowercase__ ( self : Dict ) -> Any: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def lowercase__ ( self : Dict , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int=[1, 10, 1_00] , __snake_case : Dict=4 , __snake_case : List[str]=3.0 ) -> Dict: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=__snake_case ) as executor: _lowerCAmelCase = [] _lowerCAmelCase = Counter() _lowerCAmelCase = 0 _lowerCAmelCase = defaultdict(__snake_case ) for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case ) ): for candidate in candidates: _lowerCAmelCase = candidate + """\n""" + test_case _lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id]) _lowerCAmelCase = executor.submit(__snake_case , *__snake_case ) futures.append(__snake_case ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__snake_case ): _lowerCAmelCase = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) _lowerCAmelCase , _lowerCAmelCase = [], [] for result in results.values(): result.sort() _lowerCAmelCase = [r[1]["""passed"""] for r in result] total.append(len(__snake_case ) ) correct.append(sum(__snake_case ) ) _lowerCAmelCase = np.array(__snake_case ) _lowerCAmelCase = np.array(__snake_case ) _lowerCAmelCase = k _lowerCAmelCase = {f"pass@{k}": estimate_pass_at_k(__snake_case , __snake_case , __snake_case ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" def estimator(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(lowerCAmelCase , lowerCAmelCase ): _lowerCAmelCase = itertools.repeat(lowerCAmelCase , len(lowerCAmelCase ) ) else: assert len(lowerCAmelCase ) == len(lowerCAmelCase ) _lowerCAmelCase = iter(lowerCAmelCase ) return np.array([estimator(int(lowerCAmelCase ) , int(lowerCAmelCase ) , lowerCAmelCase ) for n, c in zip(lowerCAmelCase , lowerCAmelCase )] )
70
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ): """simple docstring""" _lowerCAmelCase = size[0] - overlap_pixels * 2 _lowerCAmelCase = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 _lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 ) if "l" in remove_borders: _lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(lowerCAmelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowerCAmelCase , (original_slice, 0) ) return result def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _lowerCAmelCase = tile.crop(lowerCAmelCase ) return tile def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = n % d return n - divisor class UpperCAmelCase ( snake_case_ ): def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int: super().__init__( vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , ) def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int: torch.manual_seed(0 ) _lowerCAmelCase = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size ) _lowerCAmelCase = image.crop(__snake_case ) _lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _lowerCAmelCase = translated_slice_x - (original_image_slice / 2) _lowerCAmelCase = max(0 , __snake_case ) _lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = to_input.size _lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0] _lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case ) _lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) _lowerCAmelCase = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , ) final_image.paste( __snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case ) @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str: _lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) _lowerCAmelCase = math.ceil(image.size[0] / tile_size ) _lowerCAmelCase = math.ceil(image.size[1] / tile_size ) _lowerCAmelCase = tcx * tcy _lowerCAmelCase = 0 for y in range(__snake_case ): for x in range(__snake_case ): self._process_tile( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to("""cuda""" ) _lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(lowerCAmelCase ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) _lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class UpperCAmelCase : def __init__( self : int , __snake_case : Any , __snake_case : List[Any]=14 , __snake_case : Union[str, Any]=7 , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : List[str]=True , __snake_case : List[Any]=True , __snake_case : Any=True , __snake_case : Optional[Any]=99 , __snake_case : Dict=32 , __snake_case : int=5 , __snake_case : List[Any]=4 , __snake_case : List[str]=37 , __snake_case : Dict="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Dict=5_12 , __snake_case : List[str]=16 , __snake_case : List[Any]=2 , __snake_case : int=0.02 , __snake_case : int=3 , __snake_case : List[str]=4 , __snake_case : Any=None , ) -> Optional[int]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_labels _lowerCAmelCase = use_mc_token_ids _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope _lowerCAmelCase = self.vocab_size - 1 def lowercase__ ( self : List[str] ) -> List[str]: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None if self.use_mc_token_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() _lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowercase__ ( self : Dict ) -> str: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowercase__ ( self : List[Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Any , __snake_case : Optional[int] , *__snake_case : Union[str, Any] ) -> int: _lowerCAmelCase = CTRLModel(config=__snake_case ) model.to(__snake_case ) model.eval() model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case ) model(__snake_case , token_type_ids=__snake_case ) _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowercase__ ( self : Tuple , __snake_case : Tuple , __snake_case : int , __snake_case : List[str] , __snake_case : Any , __snake_case : List[str] , *__snake_case : List[str] ) -> Optional[int]: _lowerCAmelCase = CTRLLMHeadModel(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[str] ) -> str: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : int , __snake_case : Tuple , __snake_case : Dict , *__snake_case : List[Any] ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = CTRLForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Optional[Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () _lowercase: Any = (CTRLLMHeadModel,) if is_torch_available() else () _lowercase: List[str] = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) _lowercase: Tuple = True _lowercase: Dict = False _lowercase: List[Any] = False def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] ) -> Union[str, Any]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowercase__ ( self : List[str] ) -> List[str]: _lowerCAmelCase = CTRLModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , n_embd=37 ) def lowercase__ ( self : List[str] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> str: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[int] ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__snake_case ) def lowercase__ ( self : Optional[int] ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__snake_case ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Optional[Any] ) -> int: pass @slow def lowercase__ ( self : str ) -> Any: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = CTRLModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowercase__ ( self : Dict ) -> List[str]: pass @require_torch class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Tuple ) -> Tuple: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowercase__ ( self : int ) -> List[str]: _lowerCAmelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(__snake_case ) _lowerCAmelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__snake_case ) # Legal the president is _lowerCAmelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _lowerCAmelCase = model.generate(__snake_case , do_sample=__snake_case ) self.assertListEqual(output_ids[0].tolist() , __snake_case )
70
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: int = KandinskyVaaImgaImgPipeline _lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase: Optional[int] = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase: Tuple = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase: List[str] = False @property def lowercase__ ( self : str ) -> List[str]: return 32 @property def lowercase__ ( self : Optional[int] ) -> List[Any]: return 32 @property def lowercase__ ( self : Tuple ) -> str: return self.time_input_dim @property def lowercase__ ( self : Any ) -> Optional[int]: return self.time_input_dim * 4 @property def lowercase__ ( self : int ) -> Optional[Any]: return 1_00 @property def lowercase__ ( self : int ) -> Dict: torch.manual_seed(0 ) _lowerCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase = UNetaDConditionModel(**__snake_case ) return model @property def lowercase__ ( self : Union[str, Any] ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Dict ) -> str: torch.manual_seed(0 ) _lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Optional[int] ) -> Optional[int]: _lowerCAmelCase = self.dummy_unet _lowerCAmelCase = self.dummy_movq _lowerCAmelCase = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase = DDIMScheduler(**__snake_case ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __snake_case ) # create init_image _lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) ) _lowerCAmelCase = output.images _lowerCAmelCase = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Any ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> Dict: _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase = """A red cartoon frog, 4k""" _lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) _lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase = pipeline( image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
70
1
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets A__ : Tuple =''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' A__ : List[str] =''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' A__ : List[str] ='''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def lowercase__ ( self : Tuple ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def lowercase__ ( self : int , __snake_case : str , __snake_case : str , __snake_case : Any=None ) -> str: return { "matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ), }
70
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) ) class UpperCAmelCase : def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_encoder_blocks _lowerCAmelCase = sr_ratios _lowerCAmelCase = depths _lowerCAmelCase = hidden_sizes _lowerCAmelCase = downsampling_rates _lowerCAmelCase = num_attention_heads _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = scope def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[Any] ) -> List[str]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple: _lowerCAmelCase = SegformerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]: _lowerCAmelCase = 1 _lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : Optional[int] ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _lowercase: Tuple = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase: Tuple = True _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Optional[Any] = False def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = SegformerModelTester(self ) _lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Dict: self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Dict ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case ) def lowercase__ ( self : Dict ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__snake_case ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Union[str, Any]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def lowercase__ ( self : Optional[int] ) -> int: pass def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions _lowerCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(__snake_case ) , __snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _lowerCAmelCase = (self.model_tester.image_size // 32) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _lowerCAmelCase = len(__snake_case ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) self.assertEqual(out_len + 1 , len(__snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowercase__ ( self : int ) -> List[str]: def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ): _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase__ ( self : Optional[Any] ) -> Any: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__snake_case ): continue _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = model(**__snake_case ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Tuple ) -> Dict: pass @slow def lowercase__ ( self : str ) -> Optional[int]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SegformerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Union[str, Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def lowercase__ ( self : Optional[Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) ) @slow def lowercase__ ( self : Any ) -> str: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = outputs.logits.detach().cpu() _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] ) _lowerCAmelCase = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __snake_case ) _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) _lowerCAmelCase = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , __snake_case )
70
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] =logging.get_logger(__name__) A__ : List[Any] ={ '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class UpperCAmelCase ( snake_case_ ): _lowercase: int = '''markuplm''' def __init__( self : Dict , __snake_case : Union[str, Any]=3_05_22 , __snake_case : Tuple=7_68 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : str=30_72 , __snake_case : List[Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=5_12 , __snake_case : List[str]=2 , __snake_case : int=0.02 , __snake_case : List[str]=1E-1_2 , __snake_case : Optional[int]=0 , __snake_case : str=0 , __snake_case : Tuple=2 , __snake_case : Tuple=2_56 , __snake_case : int=10_24 , __snake_case : Union[str, Any]=2_16 , __snake_case : List[str]=10_01 , __snake_case : Any=32 , __snake_case : Tuple=50 , __snake_case : str="absolute" , __snake_case : Tuple=True , __snake_case : List[Any]=None , **__snake_case : Union[str, Any] , ) -> List[Any]: super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = use_cache _lowerCAmelCase = classifier_dropout # additional properties _lowerCAmelCase = max_depth _lowerCAmelCase = max_xpath_tag_unit_embeddings _lowerCAmelCase = max_xpath_subs_unit_embeddings _lowerCAmelCase = tag_pad_id _lowerCAmelCase = subs_pad_id _lowerCAmelCase = xpath_unit_hidden_size
70
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A__ : Optional[Any] ={ '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : List[str] =logging.get_logger(__name__) A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : Any ={ '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': 15_36, '''junnyu/roformer_chinese_base''': 15_36, '''junnyu/roformer_chinese_char_small''': 5_12, '''junnyu/roformer_chinese_char_base''': 5_12, '''junnyu/roformer_small_discriminator''': 1_28, '''junnyu/roformer_small_generator''': 1_28, } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = VOCAB_FILES_NAMES _lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP _lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: str = PRETRAINED_INIT_CONFIGURATION _lowercase: List[Any] = RoFormerTokenizer def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]: super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents ): _lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) ) _lowerCAmelCase = do_lower_case _lowerCAmelCase = strip_accents _lowerCAmelCase = pre_tok_class(**__snake_case ) _lowerCAmelCase = do_lower_case def __getstate__( self : int ) -> Optional[int]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = BertPreTokenizer() return state def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]: _lowerCAmelCase = d _lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab() _lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) ) def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]: _lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: _lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str: _lowerCAmelCase = BertPreTokenizer() return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
70
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor A__ : int =logging.get_logger(__name__) class UpperCAmelCase ( snake_case_ ): def __init__( self : List[str] , *__snake_case : Optional[Any] , **__snake_case : Dict ) -> None: warnings.warn( """The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use SegformerImageProcessor instead.""" , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
70
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline _lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) _lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : List[str] ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ) _lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : Optional[int] ) -> List[Any]: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = StableDiffusionControlNetImgaImgPipeline _lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__snake_case : Optional[Any] ): if isinstance(__snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), ] _lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : List[str] ) -> Dict: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) _lowerCAmelCase = 10.0 _lowerCAmelCase = 4 _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def lowercase__ ( self : int ) -> str: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Optional[Any] ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : int ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) _lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase = """evil space-punk bird""" _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = pipe( __snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) _lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
70
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCAmelCase ( snake_case_ ): _lowercase: str = '''char''' _lowercase: Tuple = '''bpe''' _lowercase: List[Any] = '''wp''' A__ : List[Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCAmelCase ( snake_case_ ): _lowercase: Tuple = ['''image_processor''', '''char_tokenizer'''] _lowercase: Union[str, Any] = '''ViTImageProcessor''' _lowercase: Any = '''MgpstrTokenizer''' def __init__( self : List[Any] , __snake_case : Dict=None , __snake_case : Tuple=None , **__snake_case : Tuple ) -> int: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) _lowerCAmelCase = tokenizer _lowerCAmelCase = AutoTokenizer.from_pretrained("""gpt2""" ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(__snake_case , __snake_case ) def __call__( self : str , __snake_case : Any=None , __snake_case : str=None , __snake_case : str=None , **__snake_case : List[Any] ) -> List[Any]: if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.char_tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = sequences _lowerCAmelCase = char_preds.size(0 ) _lowerCAmelCase , _lowerCAmelCase = self._decode_helper(__snake_case , """char""" ) _lowerCAmelCase , _lowerCAmelCase = self._decode_helper(__snake_case , """bpe""" ) _lowerCAmelCase , _lowerCAmelCase = self._decode_helper(__snake_case , """wp""" ) _lowerCAmelCase = [] _lowerCAmelCase = [] for i in range(__snake_case ): _lowerCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCAmelCase = scores.index(max(__snake_case ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCAmelCase = {} _lowerCAmelCase = final_strs _lowerCAmelCase = final_scores _lowerCAmelCase = char_strs _lowerCAmelCase = bpe_strs _lowerCAmelCase = wp_strs return out def lowercase__ ( self : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> List[str]: if format == DecodeType.CHARACTER: _lowerCAmelCase = self.char_decode _lowerCAmelCase = 1 _lowerCAmelCase = """[s]""" elif format == DecodeType.BPE: _lowerCAmelCase = self.bpe_decode _lowerCAmelCase = 2 _lowerCAmelCase = """#""" elif format == DecodeType.WORDPIECE: _lowerCAmelCase = self.wp_decode _lowerCAmelCase = 1_02 _lowerCAmelCase = """[SEP]""" else: raise ValueError(f"Format {format} is not supported." ) _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = pred_logits.size(0 ) _lowerCAmelCase = pred_logits.size(1 ) _lowerCAmelCase , _lowerCAmelCase = pred_logits.topk(1 , dim=-1 , largest=__snake_case , sorted=__snake_case ) _lowerCAmelCase = preds_index.view(-1 , __snake_case )[:, 1:] _lowerCAmelCase = decoder(__snake_case ) _lowerCAmelCase , _lowerCAmelCase = torch.nn.functional.softmax(__snake_case , dim=2 ).max(dim=2 ) _lowerCAmelCase = preds_max_prob[:, 1:] for index in range(__snake_case ): _lowerCAmelCase = preds_str[index].find(__snake_case ) _lowerCAmelCase = preds_str[index][:pred_eos] _lowerCAmelCase = preds_index[index].cpu().tolist() _lowerCAmelCase = pred_index.index(__snake_case ) if eos_token in pred_index else -1 _lowerCAmelCase = preds_max_prob[index][: pred_eos_index + 1] _lowerCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__snake_case ) conf_scores.append(__snake_case ) return dec_strs, conf_scores def lowercase__ ( self : List[Any] , __snake_case : List[str] ) -> Optional[int]: _lowerCAmelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__snake_case )] return decode_strs def lowercase__ ( self : List[str] , __snake_case : Optional[int] ) -> str: return self.bpe_tokenizer.batch_decode(__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : List[str] ) -> Optional[int]: _lowerCAmelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__snake_case )] return decode_strs
70
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__ : List[Any] =logging.get_logger(__name__) A__ : Any =torch.device('''cpu''') def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _lowerCAmelCase = k_new.split(""".""" ) if ls[2].isdigit(): _lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase ) else: _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval() hf_model.load_state_dict(lowerCAmelCase ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(lowerCAmelCase ) _lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A__ : Tuple =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
70
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_00, 0.25) = }""") print(F"""{price_plus_tax(125.50, 0.05) = }""")
70
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A__ : List[Any] =pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_dataset(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_metric(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_names(lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _lowerCAmelCase = expected_configs[0] assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available A__ : List[str] ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' from torch import nn def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
70
1
'''simple docstring''' from __future__ import annotations import requests A__ : Optional[Any] =set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "new" , lowerCAmelCase = None ): """simple docstring""" _lowerCAmelCase = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase ) - valid_terms ) ): _lowerCAmelCase = f"Invalid search term: {invalid_search_terms}" raise ValueError(lowerCAmelCase ) _lowerCAmelCase = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"""User-agent""": """A random string"""} , ) if response.status_code == 4_29: raise requests.HTTPError _lowerCAmelCase = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase )} _lowerCAmelCase = {} for id_ in range(lowerCAmelCase ): _lowerCAmelCase = { item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
70
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position A__ : Dict ='''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip A__ : Tuple =concatenate_datasets A__ : Dict =DownloadConfig A__ : int =DownloadManager A__ : Union[str, Any] =DownloadMode A__ : Tuple =DownloadConfig A__ : Optional[Any] =DownloadMode A__ : str =DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__ : int ={ '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[Any] =['''ChineseCLIPFeatureExtractor'''] A__ : Tuple =['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =[ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys A__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Tuple ={ '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__ : str ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =['''YolosFeatureExtractor'''] A__ : Optional[Any] =['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] =[ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
70
1
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match" _lowerCAmelCase = nn.Parameter(lowerCAmelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match" _lowerCAmelCase = nn.Parameter(lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = np.asarray(weights[0] ) _lowerCAmelCase = np.asarray(weights[1] ) _lowerCAmelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = np.asarray(weights[0] ) _lowerCAmelCase = np.asarray(weights[1] ) _lowerCAmelCase = np.asarray(weights[2] ) _lowerCAmelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = weights[0][0][0] _lowerCAmelCase = np.asarray(layer_norm_a[0] ) _lowerCAmelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , ) # lsh weights + output _lowerCAmelCase = weights[0][1] if len(lowerCAmelCase ) < 4: set_layer_weights_in_torch_lsh(lowerCAmelCase , torch_block.attention , lowerCAmelCase ) else: set_layer_weights_in_torch_local(lowerCAmelCase , torch_block.attention , lowerCAmelCase ) # intermediate weighs _lowerCAmelCase = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCAmelCase ) == 4: _lowerCAmelCase = intermediate_weights[2] # layernorm 2 _lowerCAmelCase = np.asarray(intermediate_weights[0][0] ) _lowerCAmelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , ) # intermediate dense _lowerCAmelCase = np.asarray(intermediate_weights[1][0] ) _lowerCAmelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , ) # intermediate out _lowerCAmelCase = np.asarray(intermediate_weights[4][0] ) _lowerCAmelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = torch_model.reformer # word embeds _lowerCAmelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase ) , ) if isinstance(weights[3] , lowerCAmelCase ): _lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _lowerCAmelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"{position_embeddings[emb_idx]} emb does not match" _lowerCAmelCase = nn.Parameter(torch.tensor(lowerCAmelCase ) ) _lowerCAmelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCAmelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # output layer norm _lowerCAmelCase = np.asarray(weights[7][0] ) _lowerCAmelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , ) # output embeddings _lowerCAmelCase = np.asarray(weights[9][0] ) _lowerCAmelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = ReformerConfig.from_json_file(lowerCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) _lowerCAmelCase = ReformerModelWithLMHead(lowerCAmelCase ) with open(lowerCAmelCase , """rb""" ) as f: _lowerCAmelCase = pickle.load(lowerCAmelCase )["""weights"""] set_model_weights_in_torch(lowerCAmelCase , lowerCAmelCase , config.hidden_size ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , lowerCAmelCase ) if __name__ == "__main__": A__ : int =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__ : Dict =parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
70
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: Tuple = PandasConfig def lowercase__ ( self : Optional[Any] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any: for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) ) yield i, self._cast_table(__snake_case )
70
1
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) _lowerCAmelCase = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase ) DownloadCommand.register_subcommand(lowerCAmelCase ) EnvironmentCommand.register_subcommand(lowerCAmelCase ) RunCommand.register_subcommand(lowerCAmelCase ) ServeCommand.register_subcommand(lowerCAmelCase ) UserCommands.register_subcommand(lowerCAmelCase ) AddNewModelCommand.register_subcommand(lowerCAmelCase ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase ) LfsCommands.register_subcommand(lowerCAmelCase ) PTtoTFCommand.register_subcommand(lowerCAmelCase ) # Let's go _lowerCAmelCase = parser.parse_args() if not hasattr(lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run _lowerCAmelCase = args.func(lowerCAmelCase ) service.run() if __name__ == "__main__": main()
70
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase : def __init__( self : str , __snake_case : Any ) -> str: _lowerCAmelCase = str(id_ ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = [] _lowerCAmelCase = {} # {vertex:distance} def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any: return self.key < other.key def __repr__( self : Optional[Any] ) -> Optional[Any]: return self.id def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]: self.neighbors.append(__snake_case ) def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = weight def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = graph[:] while q: _lowerCAmelCase = min(lowerCAmelCase ) q.remove(lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] for i in range(1 , len(lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = list(lowerCAmelCase ) hq.heapify(lowerCAmelCase ) while h: _lowerCAmelCase = hq.heappop(lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] hq.heapify(lowerCAmelCase ) for i in range(1 , len(lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCamelCase__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
70
1
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A__ : Optional[int] =[ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A__ : List[Any] =logging.getLogger() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""-f""" ) _lowerCAmelCase = parser.parse_args() return args.f def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase="eval" ): """simple docstring""" _lowerCAmelCase = os.path.join(lowerCAmelCase , f"{split}_results.json" ) if os.path.exists(lowerCAmelCase ): with open(lowerCAmelCase , """r""" ) as f: return json.load(lowerCAmelCase ) raise ValueError(f"can't find {path}" ) A__ : int =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[str] ) -> Optional[int]: _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_flax_glue.main() _lowerCAmelCase = get_results(__snake_case ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) @slow def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_clm_flax.main() _lowerCAmelCase = get_results(__snake_case ) self.assertLess(result["""eval_perplexity"""] , 1_00 ) @slow def lowercase__ ( self : Union[str, Any] ) -> str: _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_summarization_flax.main() _lowerCAmelCase = get_results(__snake_case , split="""test""" ) self.assertGreaterEqual(result["""test_rouge1"""] , 10 ) self.assertGreaterEqual(result["""test_rouge2"""] , 2 ) self.assertGreaterEqual(result["""test_rougeL"""] , 7 ) self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 ) @slow def lowercase__ ( self : Optional[int] ) -> List[Any]: _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_mlm_flax.main() _lowerCAmelCase = get_results(__snake_case ) self.assertLess(result["""eval_perplexity"""] , 42 ) @slow def lowercase__ ( self : List[Any] ) -> Optional[Any]: _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_ta_mlm_flax.main() _lowerCAmelCase = get_results(__snake_case ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 ) @slow def lowercase__ ( self : Optional[Any] ) -> Tuple: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _lowerCAmelCase = 7 if get_gpu_count() > 1 else 2 _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_flax_ner.main() _lowerCAmelCase = get_results(__snake_case ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) self.assertGreaterEqual(result["""eval_f1"""] , 0.3 ) @slow def lowercase__ ( self : List[str] ) -> Union[str, Any]: _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(__snake_case , """argv""" , __snake_case ): run_qa.main() _lowerCAmelCase = get_results(__snake_case ) self.assertGreaterEqual(result["""eval_f1"""] , 30 ) self.assertGreaterEqual(result["""eval_exact"""] , 30 )
70
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss _lowerCAmelCase = -(labels.shape[-1] * loss.item()) _lowerCAmelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
70
1
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( snake_case_ ): _lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer'''] _lowercase: int = '''AutoImageProcessor''' _lowercase: Optional[int] = '''AutoTokenizer''' def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__snake_case , __snake_case ) _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) _lowerCAmelCase = kwargs.pop("""images""" , __snake_case ) _lowerCAmelCase = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _lowerCAmelCase = args[0] _lowerCAmelCase = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any: return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def lowercase__ ( self : int ) -> Optional[Any]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer yield _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple: if added_vocab is None: _lowerCAmelCase = self.tokenizer.get_added_vocab() _lowerCAmelCase = {} while tokens: _lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE ) if start_token is None: break _lowerCAmelCase = start_token.group(1 ) _lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE ) _lowerCAmelCase = start_token.group() if end_token is None: _lowerCAmelCase = tokens.replace(__snake_case , """""" ) else: _lowerCAmelCase = end_token.group() _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE ) if content is not None: _lowerCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case ) if value: if len(__snake_case ) == 1: _lowerCAmelCase = value[0] _lowerCAmelCase = value else: # leaf nodes _lowerCAmelCase = [] for leaf in content.split(R"""<sep/>""" ): _lowerCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(__snake_case ) if len(output[key] ) == 1: _lowerCAmelCase = output[key][0] _lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case ) if len(__snake_case ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , ) return self.image_processor_class @property def lowercase__ ( self : List[Any] ) -> Any: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , ) return self.image_processor
70
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A__ : Any =logging.get_logger(__name__) A__ : List[Any] ='''▁''' A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] ={ '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A__ : Dict ={ '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCAmelCase ( snake_case_ ): _lowercase: int = VOCAB_FILES_NAMES _lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase: str = ['''input_ids''', '''attention_mask'''] _lowercase: List[int] = [] _lowercase: List[int] = [] def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase = legacy_behaviour super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__snake_case ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase = 1 _lowerCAmelCase = len(self.sp_model ) _lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case ) } _lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" _lowerCAmelCase = self.lang_code_to_id[self._src_lang] _lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> List[str]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None _lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase__ ( self : List[Any] ) -> Any: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase__ ( self : int ) -> str: return self._src_lang @src_lang.setter def lowercase__ ( self : Dict , __snake_case : str ) -> None: _lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) _lowerCAmelCase = [1] * len(self.prefix_tokens ) _lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase = src_lang _lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case ) _lowerCAmelCase = self.convert_tokens_to_ids(__snake_case ) _lowerCAmelCase = tgt_lang_id return inputs def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(__snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip() return out_string def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __snake_case ) elif not os.path.isfile(self.vocab_file ): with open(__snake_case , """wb""" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding: _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case ) def lowercase__ ( self : str ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Dict ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , __snake_case : int ) -> None: _lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id] def lowercase__ ( self : Any , __snake_case : str ) -> None: _lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id]
70
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : Optional[Any] =logging.get_logger(__name__) A__ : str ={ '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCAmelCase ( snake_case_ , snake_case_ ): _lowercase: str = '''resnet''' _lowercase: str = ['''basic''', '''bottleneck'''] def __init__( self : Optional[int] , __snake_case : Tuple=3 , __snake_case : List[str]=64 , __snake_case : Optional[Any]=[2_56, 5_12, 10_24, 20_48] , __snake_case : str=[3, 4, 6, 3] , __snake_case : int="bottleneck" , __snake_case : Optional[Any]="relu" , __snake_case : int=False , __snake_case : int=None , __snake_case : Union[str, Any]=None , **__snake_case : Any , ) -> int: super().__init__(**__snake_case ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) _lowerCAmelCase = num_channels _lowerCAmelCase = embedding_size _lowerCAmelCase = hidden_sizes _lowerCAmelCase = depths _lowerCAmelCase = layer_type _lowerCAmelCase = hidden_act _lowerCAmelCase = downsample_in_first_stage _lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )] _lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices( out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names ) class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[int] = version.parse('''1.11''' ) @property def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : Optional[int] ) -> float: return 1E-3
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(length - 1 ): _lowerCAmelCase = i for k in range(i + 1 , lowerCAmelCase ): if collection[k] < collection[least]: _lowerCAmelCase = k if least != i: _lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ : str =input('''Enter numbers separated by a comma:\n''').strip() A__ : Optional[int] =[int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
70
1
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ): """simple docstring""" _lowerCAmelCase = size[0] - overlap_pixels * 2 _lowerCAmelCase = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 _lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 ) if "l" in remove_borders: _lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(lowerCAmelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowerCAmelCase , (original_slice, 0) ) return result def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _lowerCAmelCase = tile.crop(lowerCAmelCase ) return tile def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = n % d return n - divisor class UpperCAmelCase ( snake_case_ ): def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int: super().__init__( vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , ) def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int: torch.manual_seed(0 ) _lowerCAmelCase = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size ) _lowerCAmelCase = image.crop(__snake_case ) _lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _lowerCAmelCase = translated_slice_x - (original_image_slice / 2) _lowerCAmelCase = max(0 , __snake_case ) _lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = to_input.size _lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0] _lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case ) _lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) _lowerCAmelCase = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , ) final_image.paste( __snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case ) @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str: _lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) _lowerCAmelCase = math.ceil(image.size[0] / tile_size ) _lowerCAmelCase = math.ceil(image.size[1] / tile_size ) _lowerCAmelCase = tcx * tcy _lowerCAmelCase = 0 for y in range(__snake_case ): for x in range(__snake_case ): self._process_tile( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to("""cuda""" ) _lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(lowerCAmelCase ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) _lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
70
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = offset _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict: _lowerCAmelCase = image.astype(np.floataa ) if offset: _lowerCAmelCase = image - (scale / 2) return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__snake_case ) if do_resize: _lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) if do_center_crop: _lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case ) if do_rescale: _lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case ) if do_normalize: _lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) _lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case ) return image def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = offset if offset is not None else self.offset _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__snake_case ) _lowerCAmelCase = [ [ self._preprocess_image( image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
70
1
'''simple docstring''' import sys A__ : Tuple =( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def UpperCamelCase__ ( lowerCAmelCase = N ): """simple docstring""" _lowerCAmelCase = -sys.maxsize - 1 for i in range(len(lowerCAmelCase ) - 12 ): _lowerCAmelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _lowerCAmelCase = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
70
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( snake_case_ ): _lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer'''] _lowercase: int = '''AutoImageProcessor''' _lowercase: Optional[int] = '''AutoTokenizer''' def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__snake_case , __snake_case ) _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) _lowerCAmelCase = kwargs.pop("""images""" , __snake_case ) _lowerCAmelCase = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _lowerCAmelCase = args[0] _lowerCAmelCase = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any: return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def lowercase__ ( self : int ) -> Optional[Any]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer yield _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple: if added_vocab is None: _lowerCAmelCase = self.tokenizer.get_added_vocab() _lowerCAmelCase = {} while tokens: _lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE ) if start_token is None: break _lowerCAmelCase = start_token.group(1 ) _lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE ) _lowerCAmelCase = start_token.group() if end_token is None: _lowerCAmelCase = tokens.replace(__snake_case , """""" ) else: _lowerCAmelCase = end_token.group() _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE ) if content is not None: _lowerCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case ) if value: if len(__snake_case ) == 1: _lowerCAmelCase = value[0] _lowerCAmelCase = value else: # leaf nodes _lowerCAmelCase = [] for leaf in content.split(R"""<sep/>""" ): _lowerCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(__snake_case ) if len(output[key] ) == 1: _lowerCAmelCase = output[key][0] _lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case ) if len(__snake_case ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , ) return self.image_processor_class @property def lowercase__ ( self : List[Any] ) -> Any: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , ) return self.image_processor
70
1
'''simple docstring''' class UpperCAmelCase : def __init__( self : str , __snake_case : List[str] ) -> Union[str, Any]: _lowerCAmelCase = val _lowerCAmelCase = None _lowerCAmelCase = None def lowercase__ ( self : Optional[int] , __snake_case : str ) -> str: if self.val: if val < self.val: if self.left is None: _lowerCAmelCase = Node(__snake_case ) else: self.left.insert(__snake_case ) elif val > self.val: if self.right is None: _lowerCAmelCase = Node(__snake_case ) else: self.right.insert(__snake_case ) else: _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if root: inorder(root.left , lowerCAmelCase ) res.append(root.val ) inorder(root.right , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if len(lowerCAmelCase ) == 0: return arr _lowerCAmelCase = Node(arr[0] ) for i in range(1 , len(lowerCAmelCase ) ): root.insert(arr[i] ) # Traverse BST in order. _lowerCAmelCase = [] inorder(lowerCAmelCase , lowerCAmelCase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
70
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : Optional[Any] ={ '''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''], '''configuration_data2vec_text''': [ '''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecTextConfig''', '''Data2VecTextOnnxConfig''', ], '''configuration_data2vec_vision''': [ '''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecVisionConfig''', '''Data2VecVisionOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] =[ '''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecAudioForAudioFrameClassification''', '''Data2VecAudioForCTC''', '''Data2VecAudioForSequenceClassification''', '''Data2VecAudioForXVector''', '''Data2VecAudioModel''', '''Data2VecAudioPreTrainedModel''', ] A__ : Optional[int] =[ '''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecTextForCausalLM''', '''Data2VecTextForMaskedLM''', '''Data2VecTextForMultipleChoice''', '''Data2VecTextForQuestionAnswering''', '''Data2VecTextForSequenceClassification''', '''Data2VecTextForTokenClassification''', '''Data2VecTextModel''', '''Data2VecTextPreTrainedModel''', ] A__ : str =[ '''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecVisionForImageClassification''', '''Data2VecVisionForMaskedImageModeling''', '''Data2VecVisionForSemanticSegmentation''', '''Data2VecVisionModel''', '''Data2VecVisionPreTrainedModel''', ] if is_tf_available(): A__ : Optional[Any] =[ '''TFData2VecVisionForImageClassification''', '''TFData2VecVisionForSemanticSegmentation''', '''TFData2VecVisionModel''', '''TFData2VecVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: _lowerCAmelCase = json.load(lowerCAmelCase ) for dpr_record in tqdm(lowerCAmelCase ): _lowerCAmelCase = dpr_record["""question"""] _lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor A__ : str =logging.get_logger(__name__) class UpperCAmelCase ( snake_case_ ): def __init__( self : str , *__snake_case : Union[str, Any] , **__snake_case : Optional[Any] ) -> None: warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
70
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ): """simple docstring""" _lowerCAmelCase = size[0] - overlap_pixels * 2 _lowerCAmelCase = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 _lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 ) if "l" in remove_borders: _lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(lowerCAmelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowerCAmelCase , (original_slice, 0) ) return result def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _lowerCAmelCase = tile.crop(lowerCAmelCase ) return tile def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = n % d return n - divisor class UpperCAmelCase ( snake_case_ ): def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int: super().__init__( vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , ) def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int: torch.manual_seed(0 ) _lowerCAmelCase = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size ) _lowerCAmelCase = image.crop(__snake_case ) _lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _lowerCAmelCase = translated_slice_x - (original_image_slice / 2) _lowerCAmelCase = max(0 , __snake_case ) _lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = to_input.size _lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0] _lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case ) _lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) _lowerCAmelCase = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , ) final_image.paste( __snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case ) @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str: _lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) _lowerCAmelCase = math.ceil(image.size[0] / tile_size ) _lowerCAmelCase = math.ceil(image.size[1] / tile_size ) _lowerCAmelCase = tcx * tcy _lowerCAmelCase = 0 for y in range(__snake_case ): for x in range(__snake_case ): self._process_tile( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to("""cuda""" ) _lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(lowerCAmelCase ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) _lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule A__ : Tuple ={'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys A__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: int = KandinskyVaaImgaImgPipeline _lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase: Optional[int] = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase: Tuple = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase: List[str] = False @property def lowercase__ ( self : str ) -> List[str]: return 32 @property def lowercase__ ( self : Optional[int] ) -> List[Any]: return 32 @property def lowercase__ ( self : Tuple ) -> str: return self.time_input_dim @property def lowercase__ ( self : Any ) -> Optional[int]: return self.time_input_dim * 4 @property def lowercase__ ( self : int ) -> Optional[Any]: return 1_00 @property def lowercase__ ( self : int ) -> Dict: torch.manual_seed(0 ) _lowerCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase = UNetaDConditionModel(**__snake_case ) return model @property def lowercase__ ( self : Union[str, Any] ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Dict ) -> str: torch.manual_seed(0 ) _lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Optional[int] ) -> Optional[int]: _lowerCAmelCase = self.dummy_unet _lowerCAmelCase = self.dummy_movq _lowerCAmelCase = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase = DDIMScheduler(**__snake_case ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __snake_case ) # create init_image _lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) ) _lowerCAmelCase = output.images _lowerCAmelCase = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Any ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> Dict: _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase = """A red cartoon frog, 4k""" _lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) _lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase = pipeline( image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
70
1
'''simple docstring''' import math def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = 0 ): """simple docstring""" _lowerCAmelCase = end or len(lowerCAmelCase ) for i in range(lowerCAmelCase , lowerCAmelCase ): _lowerCAmelCase = i _lowerCAmelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _lowerCAmelCase = array[temp_index - 1] temp_index -= 1 _lowerCAmelCase = temp_index_value return array def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): # Max Heap """simple docstring""" _lowerCAmelCase = index _lowerCAmelCase = 2 * index + 1 # Left Node _lowerCAmelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _lowerCAmelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _lowerCAmelCase = right_index if largest != index: _lowerCAmelCase , _lowerCAmelCase = array[largest], array[index] heapify(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) for i in range(n - 1 , 0 , -1 ): _lowerCAmelCase , _lowerCAmelCase = array[0], array[i] heapify(lowerCAmelCase , 0 , lowerCAmelCase ) return array def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = low _lowerCAmelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _lowerCAmelCase , _lowerCAmelCase = array[j], array[i] i += 1 def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if len(lowerCAmelCase ) == 0: return array _lowerCAmelCase = 2 * math.ceil(math.loga(len(lowerCAmelCase ) ) ) _lowerCAmelCase = 16 return intro_sort(lowerCAmelCase , 0 , len(lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCAmelCase ) max_depth -= 1 _lowerCAmelCase = median_of_a(lowerCAmelCase , lowerCAmelCase , start + ((end - start) // 2) + 1 , end - 1 ) _lowerCAmelCase = partition(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) intro_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = p return insertion_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() A__ : Dict =input('''Enter numbers separated by a comma : ''').strip() A__ : Dict =[float(item) for item in user_input.split(''',''')] print(sort(unsorted))
70
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) ) class UpperCAmelCase : def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_encoder_blocks _lowerCAmelCase = sr_ratios _lowerCAmelCase = depths _lowerCAmelCase = hidden_sizes _lowerCAmelCase = downsampling_rates _lowerCAmelCase = num_attention_heads _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = scope def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[Any] ) -> List[str]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple: _lowerCAmelCase = SegformerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]: _lowerCAmelCase = 1 _lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : Optional[int] ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _lowercase: Tuple = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase: Tuple = True _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Optional[Any] = False def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = SegformerModelTester(self ) _lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Dict: self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Dict ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case ) def lowercase__ ( self : Dict ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__snake_case ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Union[str, Any]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def lowercase__ ( self : Optional[int] ) -> int: pass def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions _lowerCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(__snake_case ) , __snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _lowerCAmelCase = (self.model_tester.image_size // 32) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _lowerCAmelCase = len(__snake_case ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) self.assertEqual(out_len + 1 , len(__snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowercase__ ( self : int ) -> List[str]: def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ): _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase__ ( self : Optional[Any] ) -> Any: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__snake_case ): continue _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = model(**__snake_case ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Tuple ) -> Dict: pass @slow def lowercase__ ( self : str ) -> Optional[int]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SegformerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Union[str, Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def lowercase__ ( self : Optional[Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) ) @slow def lowercase__ ( self : Any ) -> str: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = outputs.logits.detach().cpu() _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] ) _lowerCAmelCase = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __snake_case ) _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) _lowerCAmelCase = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , __snake_case )
70
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ : Tuple =imread(r'''digital_image_processing/image_data/lena_small.jpg''') A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase ) # assert negative_img array for at least one True assert negative_img.any() def UpperCamelCase__ ( ): """simple docstring""" with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() _lowerCAmelCase = canny.canny(lowerCAmelCase ) # assert canny array for at least one True assert canny_array.any() def UpperCamelCase__ ( ): """simple docstring""" assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) _lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase ) assert res.any() def UpperCamelCase__ ( ): """simple docstring""" assert med.median_filter(lowerCAmelCase , 3 ).any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase ) assert grad.any() and theta.any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 ) assert sepia.all() def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" _lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" _lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. _lowerCAmelCase = imread(lowerCAmelCase , 0 ) # Test for get_neighbors_pixel function() return not None _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = image[x_coordinate][y_coordinate] _lowerCAmelCase = lbp.get_neighbors_pixel( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) assert lbp_image.any()
70
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
1
'''simple docstring''' from __future__ import annotations def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if len(lowerCAmelCase ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(lowerCAmelCase ) or left < -len(lowerCAmelCase ) or right >= len(lowerCAmelCase ) or right < -len(lowerCAmelCase ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] _lowerCAmelCase = (left + right) >> 1 # the middle _lowerCAmelCase = find_max(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # find max in range[left, mid] _lowerCAmelCase = find_max(lowerCAmelCase , mid + 1 , lowerCAmelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
70
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : List[str] =logging.get_logger(__name__) A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : Any ={ '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': 15_36, '''junnyu/roformer_chinese_base''': 15_36, '''junnyu/roformer_chinese_char_small''': 5_12, '''junnyu/roformer_chinese_char_base''': 5_12, '''junnyu/roformer_small_discriminator''': 1_28, '''junnyu/roformer_small_generator''': 1_28, } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = VOCAB_FILES_NAMES _lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP _lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: str = PRETRAINED_INIT_CONFIGURATION _lowercase: List[Any] = RoFormerTokenizer def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]: super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents ): _lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) ) _lowerCAmelCase = do_lower_case _lowerCAmelCase = strip_accents _lowerCAmelCase = pre_tok_class(**__snake_case ) _lowerCAmelCase = do_lower_case def __getstate__( self : int ) -> Optional[int]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = BertPreTokenizer() return state def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]: _lowerCAmelCase = d _lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab() _lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) ) def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]: _lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: _lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str: _lowerCAmelCase = BertPreTokenizer() return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
70
1
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ : Dict =logging.get_logger(__name__) A__ : List[str] ={ '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class UpperCAmelCase ( snake_case_ ): _lowercase: List[str] = '''owlvit_text_model''' def __init__( self : List[Any] , __snake_case : int=4_94_08 , __snake_case : str=5_12 , __snake_case : List[Any]=20_48 , __snake_case : str=12 , __snake_case : Optional[int]=8 , __snake_case : List[str]=16 , __snake_case : Union[str, Any]="quick_gelu" , __snake_case : Tuple=1E-5 , __snake_case : str=0.0 , __snake_case : int=0.02 , __snake_case : Union[str, Any]=1.0 , __snake_case : Optional[int]=0 , __snake_case : Optional[int]=4_94_06 , __snake_case : List[str]=4_94_07 , **__snake_case : Optional[Any] , ) -> int: super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = intermediate_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = hidden_act _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = attention_dropout _lowerCAmelCase = initializer_range _lowerCAmelCase = initializer_factor @classmethod def lowercase__ ( cls : Optional[Any] , __snake_case : Union[str, os.PathLike] , **__snake_case : Dict ) -> "PretrainedConfig": cls._set_token_in_kwargs(__snake_case ) _lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(__snake_case , **__snake_case ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": _lowerCAmelCase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__snake_case , **__snake_case ) class UpperCAmelCase ( snake_case_ ): _lowercase: List[str] = '''owlvit_vision_model''' def __init__( self : str , __snake_case : Union[str, Any]=7_68 , __snake_case : Optional[Any]=30_72 , __snake_case : List[Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=3 , __snake_case : Dict=7_68 , __snake_case : Optional[int]=32 , __snake_case : Tuple="quick_gelu" , __snake_case : str=1E-5 , __snake_case : List[str]=0.0 , __snake_case : int=0.02 , __snake_case : Any=1.0 , **__snake_case : int , ) -> str: super().__init__(**__snake_case ) _lowerCAmelCase = hidden_size _lowerCAmelCase = intermediate_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = hidden_act _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = attention_dropout _lowerCAmelCase = initializer_range _lowerCAmelCase = initializer_factor @classmethod def lowercase__ ( cls : int , __snake_case : Union[str, os.PathLike] , **__snake_case : Tuple ) -> "PretrainedConfig": cls._set_token_in_kwargs(__snake_case ) _lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(__snake_case , **__snake_case ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": _lowerCAmelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__snake_case , **__snake_case ) class UpperCAmelCase ( snake_case_ ): _lowercase: Tuple = '''owlvit''' _lowercase: Optional[Any] = True def __init__( self : Optional[int] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=5_12 , __snake_case : List[Any]=2.65_92 , __snake_case : Union[str, Any]=True , **__snake_case : str , ) -> Dict: super().__init__(**__snake_case ) if text_config is None: _lowerCAmelCase = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: _lowerCAmelCase = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) _lowerCAmelCase = OwlViTTextConfig(**__snake_case ) _lowerCAmelCase = OwlViTVisionConfig(**__snake_case ) _lowerCAmelCase = projection_dim _lowerCAmelCase = logit_scale_init_value _lowerCAmelCase = return_dict _lowerCAmelCase = 1.0 @classmethod def lowercase__ ( cls : str , __snake_case : Union[str, os.PathLike] , **__snake_case : Optional[Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__snake_case ) _lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(__snake_case , **__snake_case ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__snake_case , **__snake_case ) @classmethod def lowercase__ ( cls : List[Any] , __snake_case : Dict , __snake_case : Dict , **__snake_case : Optional[int] ) -> Union[str, Any]: _lowerCAmelCase = {} _lowerCAmelCase = text_config _lowerCAmelCase = vision_config return cls.from_dict(__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = self.text_config.to_dict() _lowerCAmelCase = self.vision_config.to_dict() _lowerCAmelCase = self.__class__.model_type return output class UpperCAmelCase ( snake_case_ ): @property def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def lowercase__ ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def lowercase__ ( self : List[str] ) -> float: return 1E-4 def lowercase__ ( self : Any , __snake_case : "ProcessorMixin" , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : Optional["TensorType"] = None , ) -> Mapping[str, Any]: _lowerCAmelCase = super().generate_dummy_inputs( processor.tokenizer , batch_size=__snake_case , seq_length=__snake_case , framework=__snake_case ) _lowerCAmelCase = super().generate_dummy_inputs( processor.image_processor , batch_size=__snake_case , framework=__snake_case ) return {**text_input_dict, **image_input_dict} @property def lowercase__ ( self : Union[str, Any] ) -> int: return 14
70
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline _lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) _lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : List[str] ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ) _lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : Optional[int] ) -> List[Any]: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = StableDiffusionControlNetImgaImgPipeline _lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__snake_case : Optional[Any] ): if isinstance(__snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), ] _lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : List[str] ) -> Dict: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) _lowerCAmelCase = 10.0 _lowerCAmelCase = 4 _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def lowercase__ ( self : int ) -> str: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Optional[Any] ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : int ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) _lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase = """evil space-punk bird""" _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = pipe( __snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) _lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
70
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if upper_limit < 0: raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" ) _lowerCAmelCase = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 _lowerCAmelCase = 1 if upper_limit > 0: _lowerCAmelCase = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowerCAmelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: A__ : Tuple =int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
70
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__ : List[Any] =logging.get_logger(__name__) A__ : Any =torch.device('''cpu''') def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _lowerCAmelCase = k_new.split(""".""" ) if ls[2].isdigit(): _lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase ) else: _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval() hf_model.load_state_dict(lowerCAmelCase ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(lowerCAmelCase ) _lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A__ : Tuple =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
70
1
'''simple docstring''' import requests A__ : List[str] ='''YOUR API KEY''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = giphy_api_key ): """simple docstring""" _lowerCAmelCase = """+""".join(query.split() ) _lowerCAmelCase = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" _lowerCAmelCase = requests.get(lowerCAmelCase ).json()["""data"""] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
70
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A__ : List[Any] =pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_dataset(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_metric(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_names(lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _lowerCAmelCase = expected_configs[0] assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
70
1
'''simple docstring''' import math class UpperCAmelCase : def lowercase__ ( self : int , __snake_case : list[list[float]] , __snake_case : list[int] ) -> int: _lowerCAmelCase = 0.0 _lowerCAmelCase = 0.0 for i in range(len(__snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowercase__ ( self : str , __snake_case : list[list[int | float]] , __snake_case : list[int] , __snake_case : int , __snake_case : float ) -> list[list[int | float]]: for i in range(len(__snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _lowerCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _lowerCAmelCase = SelfOrganizingMap() _lowerCAmelCase = 3 _lowerCAmelCase = 0.5 for _ in range(lowerCAmelCase ): for j in range(len(lowerCAmelCase ) ): # training sample _lowerCAmelCase = training_samples[j] # Compute the winning vector _lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase , lowerCAmelCase ) # Update the winning vector _lowerCAmelCase = self_organizing_map.update(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # classify test sample _lowerCAmelCase = [0, 0, 0, 1] _lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase , lowerCAmelCase ) # results print(f"Clusters that the test sample belongs to : {winner}" ) print(f"Weights that have been trained : {weights}" ) # running the main() function if __name__ == "__main__": main()
70
'''simple docstring''' from torch import nn def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Tuple ={ '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position A__ : Dict ='''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip A__ : Tuple =concatenate_datasets A__ : Dict =DownloadConfig A__ : int =DownloadManager A__ : Union[str, Any] =DownloadMode A__ : Tuple =DownloadConfig A__ : Optional[Any] =DownloadMode A__ : str =DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
70
1
'''simple docstring''' from collections import defaultdict from math import gcd def UpperCamelCase__ ( lowerCAmelCase = 1_50_00_00 ): """simple docstring""" _lowerCAmelCase = defaultdict(lowerCAmelCase ) _lowerCAmelCase = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , lowerCAmelCase , 2 ): if gcd(lowerCAmelCase , lowerCAmelCase ) > 1: continue _lowerCAmelCase = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Tuple ={ '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
70
1
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position A__ : Dict ='''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip A__ : Tuple =concatenate_datasets A__ : Dict =DownloadConfig A__ : int =DownloadManager A__ : Union[str, Any] =DownloadMode A__ : Tuple =DownloadConfig A__ : Optional[Any] =DownloadMode A__ : str =DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
70
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: Tuple = PandasConfig def lowercase__ ( self : Optional[Any] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any: for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) ) yield i, self._cast_table(__snake_case )
70
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline A__ : Union[str, Any] =datasets.utils.logging.get_logger(__name__) @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None _lowercase: str = "utf-8" _lowercase: Optional[str] = None _lowercase: Optional[str] = None _lowercase: bool = True # deprecated _lowercase: Optional[int] = None # deprecated _lowercase: int = 10 << 20 # 10MB _lowercase: Optional[bool] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: List[str] = JsonConfig def lowercase__ ( self : int ) -> int: if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) _lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[Any] , __snake_case : Dict ) -> Optional[Any]: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : Tuple , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): _lowerCAmelCase = self.config.features.arrow_schema.field(__snake_case ).type _lowerCAmelCase = pa_table.append_column(__snake_case , pa.array([None] * len(__snake_case ) , type=__snake_case ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Any , __snake_case : int ) -> List[str]: for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: _lowerCAmelCase = json.load(__snake_case ) # We keep only the field we are interested in _lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__snake_case , (list, tuple) ): _lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) _lowerCAmelCase = {col: [row.get(__snake_case ) for row in dataset] for col in keys} else: _lowerCAmelCase = dataset _lowerCAmelCase = pa.Table.from_pydict(__snake_case ) yield file_idx, self._cast_table(__snake_case ) # If the file has one json object per line else: with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small _lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) _lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: _lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__snake_case ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": _lowerCAmelCase = batch.decode(self.config.encoding , errors=__snake_case ).encode("""utf-8""" ) try: while True: try: _lowerCAmelCase = paj.read_json( io.BytesIO(__snake_case ) , read_options=paj.ReadOptions(block_size=__snake_case ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__snake_case , pa.ArrowInvalid ) and "straddling" not in str(__snake_case ) or block_size > len(__snake_case ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(__snake_case )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: _lowerCAmelCase = json.load(__snake_case ) except json.JSONDecodeError: logger.error(f"Failed to read file '{file}' with error {type(__snake_case )}: {e}" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__snake_case , __snake_case ): # list is the only sequence type supported in JSON try: _lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) _lowerCAmelCase = {col: [row.get(__snake_case ) for row in dataset] for col in keys} _lowerCAmelCase = pa.Table.from_pydict(__snake_case ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"Failed to read file '{file}' with error {type(__snake_case )}: {e}" ) raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None yield file_idx, self._cast_table(__snake_case ) break else: logger.error(f"Failed to read file '{file}' with error {type(__snake_case )}: {e}" ) raise ValueError( f"Not able to read records in the JSON file at {file}. " f"You should probably indicate the field of the JSON file containing your records. " f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. " f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__snake_case ) batch_idx += 1
70
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase : def __init__( self : str , __snake_case : Any ) -> str: _lowerCAmelCase = str(id_ ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = [] _lowerCAmelCase = {} # {vertex:distance} def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any: return self.key < other.key def __repr__( self : Optional[Any] ) -> Optional[Any]: return self.id def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]: self.neighbors.append(__snake_case ) def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = weight def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = graph[:] while q: _lowerCAmelCase = min(lowerCAmelCase ) q.remove(lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] for i in range(1 , len(lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = list(lowerCAmelCase ) hq.heapify(lowerCAmelCase ) while h: _lowerCAmelCase = hq.heappop(lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] hq.heapify(lowerCAmelCase ) for i in range(1 , len(lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCamelCase__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
70
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase = 1_00 ): """simple docstring""" _lowerCAmelCase = set() _lowerCAmelCase = 0 _lowerCAmelCase = n + 1 # maximum limit for a in range(2 , lowerCAmelCase ): for b in range(2 , lowerCAmelCase ): _lowerCAmelCase = a**b # calculates the current power collect_powers.add(lowerCAmelCase ) # adds the result to the set return len(lowerCAmelCase ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
70
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss _lowerCAmelCase = -(labels.shape[-1] * loss.item()) _lowerCAmelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
70
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: Tuple = PandasConfig def lowercase__ ( self : Optional[Any] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any: for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) ) yield i, self._cast_table(__snake_case )
70
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A__ : Any =logging.get_logger(__name__) A__ : List[Any] ='''▁''' A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] ={ '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A__ : Dict ={ '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCAmelCase ( snake_case_ ): _lowercase: int = VOCAB_FILES_NAMES _lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase: str = ['''input_ids''', '''attention_mask'''] _lowercase: List[int] = [] _lowercase: List[int] = [] def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase = legacy_behaviour super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__snake_case ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase = 1 _lowerCAmelCase = len(self.sp_model ) _lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case ) } _lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" _lowerCAmelCase = self.lang_code_to_id[self._src_lang] _lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> List[str]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None _lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase__ ( self : List[Any] ) -> Any: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase__ ( self : int ) -> str: return self._src_lang @src_lang.setter def lowercase__ ( self : Dict , __snake_case : str ) -> None: _lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) _lowerCAmelCase = [1] * len(self.prefix_tokens ) _lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase = src_lang _lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case ) _lowerCAmelCase = self.convert_tokens_to_ids(__snake_case ) _lowerCAmelCase = tgt_lang_id return inputs def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(__snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip() return out_string def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __snake_case ) elif not os.path.isfile(self.vocab_file ): with open(__snake_case , """wb""" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding: _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case ) def lowercase__ ( self : str ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Dict ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , __snake_case : int ) -> None: _lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id] def lowercase__ ( self : Any , __snake_case : str ) -> None: _lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id]
70
1
'''simple docstring''' import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A__ : Tuple =logging.get_logger(__name__) A__ : List[Any] ={ '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class UpperCAmelCase ( snake_case_ ): _lowercase: Any = '''bart''' _lowercase: Union[str, Any] = ['''past_key_values'''] _lowercase: str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : List[str]=10_24 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=40_96 , __snake_case : Tuple=16 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=40_96 , __snake_case : Dict=16 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=10_24 , __snake_case : int=0.1 , __snake_case : int=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Any=3 , __snake_case : List[Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : List[str]=2 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Optional[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = classifier_dropout _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ): _lowerCAmelCase = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " """The config can simply be saved and uploaded again to be fixed.""" ) class UpperCAmelCase ( snake_case_ ): @property def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _lowerCAmelCase = {0: """batch"""} _lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: _lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""} _lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(__snake_case ): _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} else: _lowerCAmelCase = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(__snake_case , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(__snake_case ): _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**__snake_case , **__snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape _lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(__snake_case , __snake_case ) _lowerCAmelCase = max(__snake_case , __snake_case ) - min_num_layers _lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(__snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(__snake_case , __snake_case ): common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) ) return common_inputs def lowercase__ ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["""attention_mask"""].dtype _lowerCAmelCase = torch.cat( [common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case ) ] return common_inputs def lowercase__ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case ) _lowerCAmelCase = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) ) return common_inputs def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) return common_inputs def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ) -> List[Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case ) else: _lowerCAmelCase = super(__snake_case , self )._flatten_past_key_values_( __snake_case , __snake_case , __snake_case , __snake_case )
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(length - 1 ): _lowerCAmelCase = i for k in range(i + 1 , lowerCAmelCase ): if collection[k] < collection[least]: _lowerCAmelCase = k if least != i: _lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ : str =input('''Enter numbers separated by a comma:\n''').strip() A__ : Optional[int] =[int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer A__ : Any =TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast A__ : Union[str, Any] =TaTokenizerFast A__ : Optional[Any] ={'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[Any] =['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys A__ : Any =_LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
70
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = offset _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict: _lowerCAmelCase = image.astype(np.floataa ) if offset: _lowerCAmelCase = image - (scale / 2) return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__snake_case ) if do_resize: _lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) if do_center_crop: _lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case ) if do_rescale: _lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case ) if do_normalize: _lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) _lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case ) return image def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = offset if offset is not None else self.offset _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__snake_case ) _lowerCAmelCase = [ [ self._preprocess_image( image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
70
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : Dict =logging.get_logger(__name__) A__ : Tuple ={ '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase ( snake_case_ , snake_case_ ): _lowercase: Optional[Any] = '''swin''' _lowercase: Union[str, Any] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] , __snake_case : Dict=2_24 , __snake_case : List[str]=4 , __snake_case : Optional[Any]=3 , __snake_case : Optional[int]=96 , __snake_case : Tuple=[2, 2, 6, 2] , __snake_case : Optional[int]=[3, 6, 12, 24] , __snake_case : Optional[Any]=7 , __snake_case : List[Any]=4.0 , __snake_case : List[Any]=True , __snake_case : Tuple=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict="gelu" , __snake_case : int=False , __snake_case : Optional[Any]=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : str=32 , __snake_case : Any=None , __snake_case : Tuple=None , **__snake_case : Tuple , ) -> int: super().__init__(**__snake_case ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(__snake_case ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(__snake_case ) - 1) ) _lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )] _lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices( out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names ) class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = version.parse('''1.11''' ) @property def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : Union[str, Any] ) -> float: return 1E-4
70
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( snake_case_ ): _lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer'''] _lowercase: int = '''AutoImageProcessor''' _lowercase: Optional[int] = '''AutoTokenizer''' def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__snake_case , __snake_case ) _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) _lowerCAmelCase = kwargs.pop("""images""" , __snake_case ) _lowerCAmelCase = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _lowerCAmelCase = args[0] _lowerCAmelCase = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any: return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def lowercase__ ( self : int ) -> Optional[Any]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer yield _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple: if added_vocab is None: _lowerCAmelCase = self.tokenizer.get_added_vocab() _lowerCAmelCase = {} while tokens: _lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE ) if start_token is None: break _lowerCAmelCase = start_token.group(1 ) _lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE ) _lowerCAmelCase = start_token.group() if end_token is None: _lowerCAmelCase = tokens.replace(__snake_case , """""" ) else: _lowerCAmelCase = end_token.group() _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE ) if content is not None: _lowerCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case ) if value: if len(__snake_case ) == 1: _lowerCAmelCase = value[0] _lowerCAmelCase = value else: # leaf nodes _lowerCAmelCase = [] for leaf in content.split(R"""<sep/>""" ): _lowerCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(__snake_case ) if len(output[key] ) == 1: _lowerCAmelCase = output[key][0] _lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case ) if len(__snake_case ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , ) return self.image_processor_class @property def lowercase__ ( self : List[Any] ) -> Any: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , ) return self.image_processor
70
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin A__ : Dict =''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class UpperCAmelCase ( unittest.TestCase , snake_case_ ): def lowercase__ ( self : List[Any] ) -> List[Any]: _lowerCAmelCase = load_tool("""text-question-answering""" ) self.tool.setup() _lowerCAmelCase = load_tool("""text-question-answering""" , remote=__snake_case ) def lowercase__ ( self : int ) -> int: _lowerCAmelCase = self.tool(__snake_case , """What did Hugging Face do in April 2021?""" ) self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" ) def lowercase__ ( self : int ) -> Any: _lowerCAmelCase = self.remote_tool(__snake_case , """What did Hugging Face do in April 2021?""" ) self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" ) def lowercase__ ( self : str ) -> Optional[Any]: _lowerCAmelCase = self.tool(text=__snake_case , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" ) def lowercase__ ( self : Optional[int] ) -> List[Any]: _lowerCAmelCase = self.remote_tool(text=__snake_case , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" )
70
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor A__ : Dict =logging.get_logger(__name__) class UpperCAmelCase ( snake_case_ ): def __init__( self : Optional[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ) -> None: warnings.warn( """The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DonutImageProcessor instead.""" , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
70
'''simple docstring''' import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: _lowerCAmelCase = json.load(lowerCAmelCase ) for dpr_record in tqdm(lowerCAmelCase ): _lowerCAmelCase = dpr_record["""question"""] _lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Tuple ) -> int: _lowerCAmelCase = """laion/clap-htsat-unfused""" _lowerCAmelCase = tempfile.mkdtemp() def lowercase__ ( self : int , **__snake_case : Dict ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **__snake_case ) def lowercase__ ( self : int , **__snake_case : List[Any] ) -> Any: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case ) def lowercase__ ( self : Any ) -> List[str]: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Optional[int] ) -> Dict: _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __snake_case ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __snake_case ) def lowercase__ ( self : int ) -> List[str]: _lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _lowerCAmelCase = self.get_feature_extractor(do_normalize=__snake_case , padding_value=1.0 ) _lowerCAmelCase = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __snake_case ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __snake_case ) def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case ) _lowerCAmelCase = floats_list((3, 10_00) ) _lowerCAmelCase = feature_extractor(__snake_case , return_tensors="""np""" ) _lowerCAmelCase = processor(audios=__snake_case , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : str ) -> Any: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case ) _lowerCAmelCase = """This is a test string""" _lowerCAmelCase = processor(text=__snake_case ) _lowerCAmelCase = tokenizer(__snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : Optional[Any] ) -> int: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case ) _lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase = processor.batch_decode(__snake_case ) _lowerCAmelCase = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def lowercase__ ( self : Any ) -> Union[str, Any]: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class UpperCAmelCase ( snake_case_ ): def __init__( self : Any , __snake_case : Any="" , __snake_case : List[Any]="train" ) -> str: assert os.path.isdir(__snake_case ) _lowerCAmelCase = [] _lowerCAmelCase = os.listdir(__snake_case ) for story_filename in story_filenames_list: if "summary" in story_filename: continue _lowerCAmelCase = os.path.join(__snake_case , __snake_case ) if not os.path.isfile(__snake_case ): continue self.documents.append(__snake_case ) def __len__( self : str ) -> List[Any]: return len(self.documents ) def __getitem__( self : List[Any] , __snake_case : Dict ) -> Dict: _lowerCAmelCase = self.documents[idx] _lowerCAmelCase = document_path.split("""/""" )[-1] with open(__snake_case , encoding="""utf-8""" ) as source: _lowerCAmelCase = source.read() _lowerCAmelCase , _lowerCAmelCase = process_story(__snake_case ) return document_name, story_lines, summary_lines def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(filter(lambda lowerCAmelCase : len(lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it _lowerCAmelCase = [_add_missing_period(lowerCAmelCase ) for line in nonempty_lines] # gather article lines _lowerCAmelCase = [] _lowerCAmelCase = deque(lowerCAmelCase ) while True: try: _lowerCAmelCase = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(lowerCAmelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines _lowerCAmelCase = list(filter(lambda lowerCAmelCase : not t.startswith("""@highlight""" ) , lowerCAmelCase ) ) return story_lines, summary_lines def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if len(lowerCAmelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(lowerCAmelCase )) ) return sequence def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = torch.ones_like(lowerCAmelCase ) _lowerCAmelCase = sequence == pad_token_id _lowerCAmelCase = 0 return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [tokenizer.encode(lowerCAmelCase ) for line in story_lines] _lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence] _lowerCAmelCase = [tokenizer.encode(lowerCAmelCase ) for line in summary_lines] _lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for sequence in batch: _lowerCAmelCase = -1 _lowerCAmelCase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(lowerCAmelCase ) return torch.tensor(lowerCAmelCase )
70
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ): """simple docstring""" _lowerCAmelCase = size[0] - overlap_pixels * 2 _lowerCAmelCase = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 _lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 ) if "l" in remove_borders: _lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(lowerCAmelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowerCAmelCase , (original_slice, 0) ) return result def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _lowerCAmelCase = tile.crop(lowerCAmelCase ) return tile def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = n % d return n - divisor class UpperCAmelCase ( snake_case_ ): def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int: super().__init__( vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , ) def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int: torch.manual_seed(0 ) _lowerCAmelCase = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size ) _lowerCAmelCase = image.crop(__snake_case ) _lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _lowerCAmelCase = translated_slice_x - (original_image_slice / 2) _lowerCAmelCase = max(0 , __snake_case ) _lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = to_input.size _lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0] _lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case ) _lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) _lowerCAmelCase = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , ) final_image.paste( __snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case ) @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str: _lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) _lowerCAmelCase = math.ceil(image.size[0] / tile_size ) _lowerCAmelCase = math.ceil(image.size[1] / tile_size ) _lowerCAmelCase = tcx * tcy _lowerCAmelCase = 0 for y in range(__snake_case ): for x in range(__snake_case ): self._process_tile( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to("""cuda""" ) _lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(lowerCAmelCase ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) _lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch A__ : Optional[int] =logging.get_logger(__name__) @dataclass class UpperCAmelCase : def __init__( self : str , __snake_case : Optional[int]=False , __snake_case : Any=False , __snake_case : Union[str, Any]=6.0 , __snake_case : Optional[Any]=None , __snake_case : Any=False , __snake_case : str=False , __snake_case : Any=None , __snake_case : List[str]="fp4" , __snake_case : Tuple=False , **__snake_case : int , ) -> List[str]: _lowerCAmelCase = load_in_abit _lowerCAmelCase = load_in_abit _lowerCAmelCase = llm_inta_threshold _lowerCAmelCase = llm_inta_skip_modules _lowerCAmelCase = llm_inta_enable_fpaa_cpu_offload _lowerCAmelCase = llm_inta_has_fpaa_weight _lowerCAmelCase = bnb_abit_quant_type _lowerCAmelCase = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: _lowerCAmelCase = torch.floataa elif isinstance(__snake_case , __snake_case ): _lowerCAmelCase = getattr(__snake_case , __snake_case ) elif isinstance(__snake_case , torch.dtype ): _lowerCAmelCase = bnb_abit_compute_dtype else: raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" ) self.post_init() def lowercase__ ( self : str ) -> Optional[int]: if not isinstance(self.llm_inta_threshold , __snake_case ): raise ValueError("""llm_int8_threshold must be a float""" ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __snake_case ): raise ValueError("""llm_int8_skip_modules must be a list of strings""" ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __snake_case ): raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" ) if not isinstance(self.llm_inta_has_fpaa_weight , __snake_case ): raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" ) if not isinstance(self.bnb_abit_quant_type , __snake_case ): raise ValueError("""bnb_4bit_quant_type must be a string""" ) if not isinstance(self.bnb_abit_use_double_quant , __snake_case ): raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" ) if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse( """0.39.0""" ): raise ValueError( """4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: return self.load_in_abit or self.load_in_abit def lowercase__ ( self : str ) -> Optional[Any]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def lowercase__ ( cls : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , **__snake_case : Any ) -> str: _lowerCAmelCase = cls(**__snake_case ) _lowerCAmelCase = [] for key, value in kwargs.items(): if hasattr(__snake_case , __snake_case ): setattr(__snake_case , __snake_case , __snake_case ) to_remove.append(__snake_case ) for key in to_remove: kwargs.pop(__snake_case , __snake_case ) if return_unused_kwargs: return config, kwargs else: return config def lowercase__ ( self : Tuple , __snake_case : Union[str, os.PathLike] ) -> str: with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer: _lowerCAmelCase = self.to_dict() _lowerCAmelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n""" writer.write(__snake_case ) def lowercase__ ( self : Optional[int] ) -> Dict[str, Any]: _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1] return output def __repr__( self : Union[str, Any] ) -> Union[str, Any]: return f"{self.__class__.__name__} {self.to_json_string()}" def lowercase__ ( self : Any , __snake_case : bool = True ) -> str: if use_diff is True: _lowerCAmelCase = self.to_diff_dict() else: _lowerCAmelCase = self.to_dict() return json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n" def lowercase__ ( self : Optional[int] ) -> Dict[str, Any]: _lowerCAmelCase = self.to_dict() # get the default config dict _lowerCAmelCase = BitsAndBytesConfig().to_dict() _lowerCAmelCase = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: _lowerCAmelCase = value return serializable_config_dict
70
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: int = KandinskyVaaImgaImgPipeline _lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase: Optional[int] = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase: Tuple = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase: List[str] = False @property def lowercase__ ( self : str ) -> List[str]: return 32 @property def lowercase__ ( self : Optional[int] ) -> List[Any]: return 32 @property def lowercase__ ( self : Tuple ) -> str: return self.time_input_dim @property def lowercase__ ( self : Any ) -> Optional[int]: return self.time_input_dim * 4 @property def lowercase__ ( self : int ) -> Optional[Any]: return 1_00 @property def lowercase__ ( self : int ) -> Dict: torch.manual_seed(0 ) _lowerCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase = UNetaDConditionModel(**__snake_case ) return model @property def lowercase__ ( self : Union[str, Any] ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Dict ) -> str: torch.manual_seed(0 ) _lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Optional[int] ) -> Optional[int]: _lowerCAmelCase = self.dummy_unet _lowerCAmelCase = self.dummy_movq _lowerCAmelCase = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase = DDIMScheduler(**__snake_case ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __snake_case ) # create init_image _lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) ) _lowerCAmelCase = output.images _lowerCAmelCase = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Any ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> Dict: _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase = """A red cartoon frog, 4k""" _lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) _lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase = pipeline( image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
70
1
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES A__ : Any ='''tiny-wmt19-en-ru''' # Build # borrowed from a test A__ : Optional[Any] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] A__ : Dict =dict(zip(vocab, range(len(vocab)))) A__ : Tuple =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: A__ : str =Path(tmpdirname) A__ : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] A__ : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] A__ : str =build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) A__ : Any =FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) A__ : Optional[Any] =FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=10_00, tgt_vocab_size=10_00, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) A__ : List[Any] =FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test A__ : Union[str, Any] =tokenizer(['''Making tiny model'''], return_tensors='''pt''') A__ : List[str] =tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
70
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) ) class UpperCAmelCase : def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_encoder_blocks _lowerCAmelCase = sr_ratios _lowerCAmelCase = depths _lowerCAmelCase = hidden_sizes _lowerCAmelCase = downsampling_rates _lowerCAmelCase = num_attention_heads _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = scope def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[Any] ) -> List[str]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple: _lowerCAmelCase = SegformerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]: _lowerCAmelCase = 1 _lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : Optional[int] ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _lowercase: Tuple = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase: Tuple = True _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Optional[Any] = False def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = SegformerModelTester(self ) _lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Dict: self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Dict ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case ) def lowercase__ ( self : Dict ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__snake_case ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Union[str, Any]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def lowercase__ ( self : Optional[int] ) -> int: pass def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions _lowerCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(__snake_case ) , __snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _lowerCAmelCase = (self.model_tester.image_size // 32) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _lowerCAmelCase = len(__snake_case ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) self.assertEqual(out_len + 1 , len(__snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowercase__ ( self : int ) -> List[str]: def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ): _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase__ ( self : Optional[Any] ) -> Any: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__snake_case ): continue _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = model(**__snake_case ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Tuple ) -> Dict: pass @slow def lowercase__ ( self : str ) -> Optional[int]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SegformerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Union[str, Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def lowercase__ ( self : Optional[Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) ) @slow def lowercase__ ( self : Any ) -> str: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = outputs.logits.detach().cpu() _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] ) _lowerCAmelCase = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __snake_case ) _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) _lowerCAmelCase = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , __snake_case )
70
1
'''simple docstring''' from torch import nn def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
70
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
1
'''simple docstring''' from __future__ import annotations def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) _lowerCAmelCase = number_of_bytes // partitions _lowerCAmelCase = [] for i in range(lowerCAmelCase ): _lowerCAmelCase = i * bytes_per_partition + 1 _lowerCAmelCase = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f"{start_bytes}-{end_bytes}" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
70
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : List[str] =logging.get_logger(__name__) A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : Any ={ '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': 15_36, '''junnyu/roformer_chinese_base''': 15_36, '''junnyu/roformer_chinese_char_small''': 5_12, '''junnyu/roformer_chinese_char_base''': 5_12, '''junnyu/roformer_small_discriminator''': 1_28, '''junnyu/roformer_small_generator''': 1_28, } A__ : Optional[int] ={ '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = VOCAB_FILES_NAMES _lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP _lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: str = PRETRAINED_INIT_CONFIGURATION _lowercase: List[Any] = RoFormerTokenizer def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]: super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents ): _lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) ) _lowerCAmelCase = do_lower_case _lowerCAmelCase = strip_accents _lowerCAmelCase = pre_tok_class(**__snake_case ) _lowerCAmelCase = do_lower_case def __getstate__( self : int ) -> Optional[int]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = BertPreTokenizer() return state def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]: _lowerCAmelCase = d _lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab() _lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) ) def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]: _lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: _lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str: _lowerCAmelCase = BertPreTokenizer() return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Dict ={ '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline _lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) _lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : List[str] ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ) _lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : Optional[int] ) -> List[Any]: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : Tuple ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = StableDiffusionControlNetImgaImgPipeline _lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} _lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__snake_case : Optional[Any] ): if isinstance(__snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) _lowerCAmelCase = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]: if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = 2 _lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), ] _lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowercase__ ( self : List[str] ) -> Dict: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) _lowerCAmelCase = 10.0 _lowerCAmelCase = 4 _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = steps _lowerCAmelCase = scale _lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def lowercase__ ( self : int ) -> str: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Optional[Any] ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowercase__ ( self : int ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) _lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase = """evil space-punk bird""" _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) ) _lowerCAmelCase = pipe( __snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) _lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
70
1
'''simple docstring''' import operator as op def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = lambda lowerCAmelCase , lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation _lowerCAmelCase = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(lowerCAmelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(lowerCAmelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ ) else: _lowerCAmelCase = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ ) _lowerCAmelCase = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ ) stack.append( str(opr[x](int(lowerCAmelCase ) , int(lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": A__ : Optional[Any] =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
70
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__ : List[Any] =logging.get_logger(__name__) A__ : Any =torch.device('''cpu''') def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _lowerCAmelCase = k_new.split(""".""" ) if ls[2].isdigit(): _lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase ) else: _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval() hf_model.load_state_dict(lowerCAmelCase ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(lowerCAmelCase ) _lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A__ : Tuple =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
70
1
'''simple docstring''' from collections.abc import Sequence def UpperCamelCase__ ( lowerCAmelCase = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) _lowerCAmelCase = nums[0] for i in range(1 , len(lowerCAmelCase ) ): _lowerCAmelCase = nums[i] _lowerCAmelCase = max(lowerCAmelCase , ans + num , lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user A__ : Optional[Any] =int(input('''Enter number of elements : ''').strip()) A__ : Any =list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
70
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A__ : List[Any] =pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_dataset(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" inspect_metric(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase ) assert "__pycache__" not in os.listdir(lowerCAmelCase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_config_names(lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _lowerCAmelCase = expected_configs[0] assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_dataset_infos(lowerCAmelCase ) assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(lowerCAmelCase ): get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
70
1
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar A__ : Union[str, Any] =TypeVar('''T''') class UpperCAmelCase ( Generic[T] ): def __init__( self : Union[str, Any] , __snake_case : bool = True ) -> None: _lowerCAmelCase = {} # dictionary of lists _lowerCAmelCase = directed def lowercase__ ( self : int , __snake_case : T , __snake_case : T ) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__snake_case ) self.adj_list[destination_vertex].append(__snake_case ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__snake_case ) _lowerCAmelCase = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__snake_case ) _lowerCAmelCase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _lowerCAmelCase = [destination_vertex] _lowerCAmelCase = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__snake_case ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__snake_case ) _lowerCAmelCase = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _lowerCAmelCase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _lowerCAmelCase = [destination_vertex] _lowerCAmelCase = [] return self def __repr__( self : int ) -> str: return pformat(self.adj_list )
70
'''simple docstring''' from torch import nn def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available A__ : str ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[Any] =['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys A__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position A__ : Dict ='''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip A__ : Tuple =concatenate_datasets A__ : Dict =DownloadConfig A__ : int =DownloadManager A__ : Union[str, Any] =DownloadMode A__ : Tuple =DownloadConfig A__ : Optional[Any] =DownloadMode A__ : str =DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
70
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: int = CycleDiffusionPipeline _lowercase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } _lowercase: List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} _lowercase: Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) _lowercase: str = IMAGE_TO_IMAGE_IMAGE_PARAMS _lowercase: Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : Union[str, Any] ) -> str: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase = CLIPTextModel(__snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : List[Any] , __snake_case : int , __snake_case : str=0 ) -> int: _lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image / 2 + 0.5 if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def lowercase__ ( self : Any ) -> List[str]: _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = CycleDiffusionPipeline(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = pipe(**__snake_case ) _lowerCAmelCase = output.images _lowerCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _lowerCAmelCase = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowercase__ ( self : Any ) -> str: _lowerCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(__snake_case , """half""" ): _lowerCAmelCase = module.half() _lowerCAmelCase = CycleDiffusionPipeline(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = self.get_dummy_inputs(__snake_case ) _lowerCAmelCase = pipe(**__snake_case ) _lowerCAmelCase = output.images _lowerCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _lowerCAmelCase = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowercase__ ( self : int ) -> Dict: return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def lowercase__ ( self : List[str] ) -> Optional[int]: return super().test_inference_batch_single_identical() @skip_mps def lowercase__ ( self : Any ) -> Any: return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowercase__ ( self : str ) -> int: return super().test_save_load_optional_components() @skip_mps def lowercase__ ( self : Optional[Any] ) -> str: return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : int ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) _lowerCAmelCase = init_image.resize((5_12, 5_12) ) _lowerCAmelCase = """CompVis/stable-diffusion-v1-4""" _lowerCAmelCase = DDIMScheduler.from_pretrained(__snake_case , subfolder="""scheduler""" ) _lowerCAmelCase = CycleDiffusionPipeline.from_pretrained( __snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = """A black colored car""" _lowerCAmelCase = """A blue colored car""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type="""np""" , ) _lowerCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowercase__ ( self : Any ) -> str: _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) _lowerCAmelCase = init_image.resize((5_12, 5_12) ) _lowerCAmelCase = """CompVis/stable-diffusion-v1-4""" _lowerCAmelCase = DDIMScheduler.from_pretrained(__snake_case , subfolder="""scheduler""" ) _lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = """A black colored car""" _lowerCAmelCase = """A blue colored car""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type="""np""" , ) _lowerCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2E-2
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Tuple ={ '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int =['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any =[ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
70
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with open(lowerCAmelCase ) as metadata_file: _lowerCAmelCase = json.load(lowerCAmelCase ) _lowerCAmelCase = LukeConfig(use_entity_aware_attention=lowerCAmelCase , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""module"""] # Load the entity vocab file _lowerCAmelCase = load_original_entity_vocab(lowerCAmelCase ) # add an entry for [MASK2] _lowerCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks _lowerCAmelCase = AddedToken("""<ent>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) _lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(lowerCAmelCase ) with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f: _lowerCAmelCase = json.load(lowerCAmelCase ) _lowerCAmelCase = """MLukeTokenizer""" with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f: json.dump(lowerCAmelCase , lowerCAmelCase ) with open(os.path.join(lowerCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase ) # Initialize the embeddings of the special tokens _lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""@"""] )[0] _lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""#"""] )[0] _lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""] _lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _lowerCAmelCase = state_dict[bias_name] _lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _lowerCAmelCase = f"encoder.layer.{layer_index}.attention.self." _lowerCAmelCase = state_dict[prefix + matrix_name] _lowerCAmelCase = state_dict[prefix + matrix_name] _lowerCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""] _lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) _lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _lowerCAmelCase = state_dict["""entity_predictions.bias"""] _lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) _lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _lowerCAmelCase = LukeForMaskedLM(config=lowerCAmelCase ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) _lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): _lowerCAmelCase = state_dict[key] else: _lowerCAmelCase = state_dict[key] _lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) if set(lowerCAmelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(lowerCAmelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase , task="""entity_classification""" ) _lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" _lowerCAmelCase = (0, 9) _lowerCAmelCase = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" ) _lowerCAmelCase = model(**lowerCAmelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _lowerCAmelCase = torch.Size((1, 33, 7_68) ) _lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _lowerCAmelCase = torch.Size((1, 1, 7_68) ) _lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase ) _lowerCAmelCase = """Tokyo is the capital of <mask>.""" _lowerCAmelCase = (24, 30) _lowerCAmelCase = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" ) _lowerCAmelCase = model(**lowerCAmelCase ) _lowerCAmelCase = encoding["""input_ids"""][0].tolist() _lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) _lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowerCAmelCase ) _lowerCAmelCase = outputs.entity_logits[0][0].argmax().item() _lowerCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(lowerCAmelCase ) ) model.save_pretrained(lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""] _lowerCAmelCase = [json.loads(lowerCAmelCase ) for line in open(lowerCAmelCase )] _lowerCAmelCase = {} for entry in data: _lowerCAmelCase = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _lowerCAmelCase = entity_id break _lowerCAmelCase = f"{language}:{entity_name}" _lowerCAmelCase = entity_id return new_mapping if __name__ == "__main__": A__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) A__ : Union[str, Any] =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
70
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _lowercase: Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase: Tuple = PandasConfig def lowercase__ ( self : Optional[Any] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): _lowerCAmelCase = data_files if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any: for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): with open(__snake_case , """rb""" ) as f: _lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) ) yield i, self._cast_table(__snake_case )
70
1
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar A__ : List[str] =TypeVar('''T''') class UpperCAmelCase ( Generic[T] ): def __init__( self : List[str] , __snake_case : T ) -> None: _lowerCAmelCase = data _lowerCAmelCase = self _lowerCAmelCase = 0 class UpperCAmelCase ( Generic[T] ): def __init__( self : List[Any] ) -> None: # map from node name to the node object _lowerCAmelCase = {} def lowercase__ ( self : Any , __snake_case : T ) -> None: # create a new set with x as its member _lowerCAmelCase = DisjointSetTreeNode(__snake_case ) def lowercase__ ( self : int , __snake_case : T ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) _lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: _lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowercase__ ( self : List[Any] , __snake_case : DisjointSetTreeNode[T] , __snake_case : DisjointSetTreeNode[T] ) -> None: # helper function for union operation if nodea.rank > nodea.rank: _lowerCAmelCase = nodea else: _lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowercase__ ( self : List[Any] , __snake_case : T , __snake_case : T ) -> None: # merge 2 disjoint sets self.link(self.find_set(__snake_case ) , self.find_set(__snake_case ) ) class UpperCAmelCase ( Generic[T] ): def __init__( self : Tuple ) -> None: # connections: map from the node to the neighbouring nodes (with weights) _lowerCAmelCase = {} def lowercase__ ( self : Any , __snake_case : T ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: _lowerCAmelCase = {} def lowercase__ ( self : List[Any] , __snake_case : T , __snake_case : T , __snake_case : int ) -> None: # add an edge with the given weight self.add_node(__snake_case ) self.add_node(__snake_case ) _lowerCAmelCase = weight _lowerCAmelCase = weight def lowercase__ ( self : List[Any] ) -> GraphUndirectedWeighted[T]: _lowerCAmelCase = [] _lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __snake_case : x[2] ) # creating the disjoint set _lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__snake_case ) # MST generation _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = edges[index] index += 1 _lowerCAmelCase = disjoint_set.find_set(__snake_case ) _lowerCAmelCase = disjoint_set.find_set(__snake_case ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__snake_case , __snake_case , __snake_case ) disjoint_set.union(__snake_case , __snake_case ) return graph
70
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase : def __init__( self : str , __snake_case : Any ) -> str: _lowerCAmelCase = str(id_ ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = [] _lowerCAmelCase = {} # {vertex:distance} def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any: return self.key < other.key def __repr__( self : Optional[Any] ) -> Optional[Any]: return self.id def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]: self.neighbors.append(__snake_case ) def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = weight def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = graph[:] while q: _lowerCAmelCase = min(lowerCAmelCase ) q.remove(lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] for i in range(1 , len(lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = list(lowerCAmelCase ) hq.heapify(lowerCAmelCase ) while h: _lowerCAmelCase = hq.heappop(lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] hq.heapify(lowerCAmelCase ) for i in range(1 , len(lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCamelCase__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
70
1
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True , lowerCAmelCase="pt" ): """simple docstring""" _lowerCAmelCase = {"""add_prefix_space""": True} if isinstance(lowerCAmelCase , lowerCAmelCase ) and not line.startswith(""" """ ) else {} _lowerCAmelCase = padding_side return tokenizer( [line] , max_length=lowerCAmelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , ): """simple docstring""" _lowerCAmelCase = input_ids.ne(lowerCAmelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCAmelCase ( snake_case_ ): def __init__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[int]="train" , __snake_case : Dict=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : List[str]="" , ) -> List[Any]: super().__init__() _lowerCAmelCase = Path(__snake_case ).joinpath(type_path + """.source""" ) _lowerCAmelCase = Path(__snake_case ).joinpath(type_path + """.target""" ) _lowerCAmelCase = self.get_char_lens(self.src_file ) _lowerCAmelCase = max_source_length _lowerCAmelCase = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" _lowerCAmelCase = tokenizer _lowerCAmelCase = prefix if n_obs is not None: _lowerCAmelCase = self.src_lens[:n_obs] _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang def __len__( self : List[str] ) -> Optional[Any]: return len(self.src_lens ) def __getitem__( self : Any , __snake_case : Tuple ) -> Dict[str, torch.Tensor]: _lowerCAmelCase = index + 1 # linecache starts at 1 _lowerCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , __snake_case ).rstrip("""\n""" ) _lowerCAmelCase = linecache.getline(str(self.tgt_file ) , __snake_case ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , __snake_case ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right _lowerCAmelCase = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __snake_case ) else self.tokenizer ) _lowerCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , __snake_case ) else self.tokenizer _lowerCAmelCase = encode_line(__snake_case , __snake_case , self.max_source_length , """right""" ) _lowerCAmelCase = encode_line(__snake_case , __snake_case , self.max_target_length , """right""" ) _lowerCAmelCase = source_inputs["""input_ids"""].squeeze() _lowerCAmelCase = target_inputs["""input_ids"""].squeeze() _lowerCAmelCase = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowercase__ ( __snake_case : Optional[Any] ) -> Tuple: return [len(__snake_case ) for x in Path(__snake_case ).open().readlines()] def lowercase__ ( self : Dict , __snake_case : Tuple ) -> Dict[str, torch.Tensor]: _lowerCAmelCase = torch.stack([x["""input_ids"""] for x in batch] ) _lowerCAmelCase = torch.stack([x["""attention_mask"""] for x in batch] ) _lowerCAmelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] ) _lowerCAmelCase = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __snake_case ) else self.tokenizer.pad_token_id ) _lowerCAmelCase = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __snake_case ) else self.tokenizer.pad_token_id ) _lowerCAmelCase = trim_batch(__snake_case , __snake_case ) _lowerCAmelCase , _lowerCAmelCase = trim_batch(__snake_case , __snake_case , attention_mask=__snake_case ) _lowerCAmelCase = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch A__ : List[str] =getLogger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = get_git_info() save_json(lowerCAmelCase , os.path.join(lowerCAmelCase , """git_log.json""" ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=4 , **lowerCAmelCase ): """simple docstring""" with open(lowerCAmelCase , """w""" ) as f: json.dump(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase , **lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" with open(lowerCAmelCase ) as f: return json.load(lowerCAmelCase ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = git.Repo(search_parent_directories=lowerCAmelCase ) _lowerCAmelCase = { """repo_id""": str(lowerCAmelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return list(map(lowerCAmelCase , lowerCAmelCase ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with open(lowerCAmelCase , """wb""" ) as f: return pickle.dump(lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" def remove_articles(lowerCAmelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCAmelCase ) def white_space_fix(lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase ) ) ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = normalize_answer(lowerCAmelCase ).split() _lowerCAmelCase = normalize_answer(lowerCAmelCase ).split() _lowerCAmelCase = Counter(lowerCAmelCase ) & Counter(lowerCAmelCase ) _lowerCAmelCase = sum(common.values() ) if num_same == 0: return 0 _lowerCAmelCase = 1.0 * num_same / len(lowerCAmelCase ) _lowerCAmelCase = 1.0 * num_same / len(lowerCAmelCase ) _lowerCAmelCase = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return normalize_answer(lowerCAmelCase ) == normalize_answer(lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" assert len(lowerCAmelCase ) == len(lowerCAmelCase ) _lowerCAmelCase = 0 for hypo, pred in zip(lowerCAmelCase , lowerCAmelCase ): em += exact_match_score(lowerCAmelCase , lowerCAmelCase ) if len(lowerCAmelCase ) > 0: em /= len(lowerCAmelCase ) return {"em": em} def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return model_prefix.startswith("""rag""" ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead _lowerCAmelCase = """dropout_rate""" for p in extra_params: if getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if not hasattr(lowerCAmelCase , lowerCAmelCase ) and not hasattr(lowerCAmelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCAmelCase ) ) delattr(lowerCAmelCase , lowerCAmelCase ) continue _lowerCAmelCase = p if hasattr(lowerCAmelCase , lowerCAmelCase ) else equivalent_param[p] setattr(lowerCAmelCase , lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) ) delattr(lowerCAmelCase , lowerCAmelCase ) return hparams, config
70
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss _lowerCAmelCase = -(labels.shape[-1] * loss.item()) _lowerCAmelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
70
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Dict =logging.get_logger(__name__) A__ : Dict ={ '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class UpperCAmelCase ( snake_case_ ): _lowercase: List[str] = '''lilt''' def __init__( self : str , __snake_case : Any=3_05_22 , __snake_case : str=7_68 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : Tuple="gelu" , __snake_case : List[str]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=5_12 , __snake_case : Optional[int]=2 , __snake_case : int=0.02 , __snake_case : Any=1E-1_2 , __snake_case : Optional[int]=0 , __snake_case : List[str]="absolute" , __snake_case : Dict=None , __snake_case : List[str]=4 , __snake_case : List[Any]=10_24 , **__snake_case : List[Any] , ) -> List[str]: super().__init__(pad_token_id=__snake_case , **__snake_case ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = classifier_dropout _lowerCAmelCase = channel_shrink_ratio _lowerCAmelCase = max_ad_position_embeddings
70
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A__ : Any =logging.get_logger(__name__) A__ : List[Any] ='''▁''' A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] ={ '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A__ : Dict ={ '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCAmelCase ( snake_case_ ): _lowercase: int = VOCAB_FILES_NAMES _lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase: str = ['''input_ids''', '''attention_mask'''] _lowercase: List[int] = [] _lowercase: List[int] = [] def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase = legacy_behaviour super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__snake_case ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase = 1 _lowerCAmelCase = len(self.sp_model ) _lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case ) } _lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" _lowerCAmelCase = self.lang_code_to_id[self._src_lang] _lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> List[str]: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None _lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase__ ( self : List[Any] ) -> Any: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase__ ( self : int ) -> str: return self._src_lang @src_lang.setter def lowercase__ ( self : Dict , __snake_case : str ) -> None: _lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) _lowerCAmelCase = [1] * len(self.prefix_tokens ) _lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase = src_lang _lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case ) _lowerCAmelCase = self.convert_tokens_to_ids(__snake_case ) _lowerCAmelCase = tgt_lang_id return inputs def lowercase__ ( self : List[Any] ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(__snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip() return out_string def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __snake_case ) elif not os.path.isfile(self.vocab_file ): with open(__snake_case , """wb""" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding: _lowerCAmelCase = src_lang _lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case ) def lowercase__ ( self : str ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Dict ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , __snake_case : int ) -> None: _lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id] def lowercase__ ( self : Any , __snake_case : str ) -> None: _lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: _lowerCAmelCase = [] _lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase = [self.cur_lang_code] _lowerCAmelCase = [self.eos_token_id]
70
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = 3_84 _lowerCAmelCase = 7 if "tiny" in model_name: _lowerCAmelCase = 96 _lowerCAmelCase = (2, 2, 6, 2) _lowerCAmelCase = (3, 6, 12, 24) elif "small" in model_name: _lowerCAmelCase = 96 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (3, 6, 12, 24) elif "base" in model_name: _lowerCAmelCase = 1_28 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (4, 8, 16, 32) _lowerCAmelCase = 12 _lowerCAmelCase = 5_12 elif "large" in model_name: _lowerCAmelCase = 1_92 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (6, 12, 24, 48) _lowerCAmelCase = 12 _lowerCAmelCase = 7_68 # set label information _lowerCAmelCase = 1_50 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """ade20k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = {v: k for k, v in idalabel.items()} _lowerCAmelCase = SwinConfig( embed_dim=lowerCAmelCase , depths=lowerCAmelCase , num_heads=lowerCAmelCase , window_size=lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) _lowerCAmelCase = UperNetConfig( backbone_config=lowerCAmelCase , auxiliary_in_channels=lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase , ) return config def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) _lowerCAmelCase = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[:dim, :] _lowerCAmelCase = in_proj_bias[: dim] _lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] _lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] _lowerCAmelCase = in_proj_weight[ -dim :, : ] _lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = x.shape _lowerCAmelCase = x.reshape(lowerCAmelCase , 4 , in_channel // 4 ) _lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCAmelCase , lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = x.shape _lowerCAmelCase = x.reshape(lowerCAmelCase , in_channel // 4 , 4 ) _lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCAmelCase , lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = x.shape[0] _lowerCAmelCase = x.reshape(4 , in_channel // 4 ) _lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = x.shape[0] _lowerCAmelCase = x.reshape(in_channel // 4 , 4 ) _lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } _lowerCAmelCase = model_name_to_url[model_name] _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , file_name=lowerCAmelCase )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCAmelCase , param.shape ) _lowerCAmelCase = get_upernet_config(lowerCAmelCase ) _lowerCAmelCase = UperNetForSemanticSegmentation(lowerCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCAmelCase = state_dict.pop(lowerCAmelCase ) if "bn" in key: _lowerCAmelCase = key.replace("""bn""" , """batch_norm""" ) _lowerCAmelCase = val # rename keys _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for src, dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) read_in_q_k_v(lowerCAmelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCAmelCase = reverse_correct_unfold_reduction_order(lowerCAmelCase ) if "norm" in key: _lowerCAmelCase = reverse_correct_unfold_norm_order(lowerCAmelCase ) model.load_state_dict(lowerCAmelCase ) # verify on image _lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert("""RGB""" ) _lowerCAmelCase = SegformerImageProcessor() _lowerCAmelCase = processor(lowerCAmelCase , return_tensors="""pt""" ).pixel_values with torch.no_grad(): _lowerCAmelCase = model(lowerCAmelCase ) _lowerCAmelCase = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCAmelCase = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": _lowerCAmelCase = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": _lowerCAmelCase = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": _lowerCAmelCase = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(lowerCAmelCase ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": A__ : Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[F"""upernet-swin-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A__ : Tuple =parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(length - 1 ): _lowerCAmelCase = i for k in range(i + 1 , lowerCAmelCase ): if collection[k] < collection[least]: _lowerCAmelCase = k if least != i: _lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ : str =input('''Enter numbers separated by a comma:\n''').strip() A__ : Optional[int] =[int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
70
1
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class UpperCAmelCase ( nn.Module ): def __init__( self : int , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ) -> Any: super().__init__() _lowerCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _lowerCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _lowerCAmelCase = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _lowerCAmelCase = [1, 0] def lowercase__ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str]=None , __snake_case : Dict=None , __snake_case : Dict=None , __snake_case : bool = True , ) -> Tuple: _lowerCAmelCase = hidden_states _lowerCAmelCase = [] _lowerCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _lowerCAmelCase = self.transformer_index_for_condition[i] _lowerCAmelCase = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _lowerCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
70
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = offset _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict: _lowerCAmelCase = image.astype(np.floataa ) if offset: _lowerCAmelCase = image - (scale / 2) return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__snake_case ) if do_resize: _lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) if do_center_crop: _lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case ) if do_rescale: _lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case ) if do_normalize: _lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) _lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case ) return image def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = offset if offset is not None else self.offset _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__snake_case ) _lowerCAmelCase = [ [ self._preprocess_image( image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
70
1
'''simple docstring''' A__ : str =[ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [False] * len(lowerCAmelCase ) _lowerCAmelCase = [s] _lowerCAmelCase = True while queue: _lowerCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowerCAmelCase ) _lowerCAmelCase = True _lowerCAmelCase = u return visited[t] def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [-1] * (len(lowerCAmelCase )) _lowerCAmelCase = 0 _lowerCAmelCase = [] _lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): _lowerCAmelCase = float("""Inf""" ) _lowerCAmelCase = sink while s != source: # Find the minimum value in select path _lowerCAmelCase = min(lowerCAmelCase , graph[parent[s]][s] ) _lowerCAmelCase = parent[s] max_flow += path_flow _lowerCAmelCase = sink while v != source: _lowerCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCAmelCase = parent[v] for i in range(len(lowerCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
70
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( snake_case_ ): _lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer'''] _lowercase: int = '''AutoImageProcessor''' _lowercase: Optional[int] = '''AutoTokenizer''' def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]: _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _lowerCAmelCase = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__snake_case , __snake_case ) _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) _lowerCAmelCase = kwargs.pop("""images""" , __snake_case ) _lowerCAmelCase = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _lowerCAmelCase = args[0] _lowerCAmelCase = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case ) if text is not None: _lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _lowerCAmelCase = encodings["""input_ids"""] return inputs def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any: return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def lowercase__ ( self : int ) -> Optional[Any]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer yield _lowerCAmelCase = self.image_processor _lowerCAmelCase = False def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple: if added_vocab is None: _lowerCAmelCase = self.tokenizer.get_added_vocab() _lowerCAmelCase = {} while tokens: _lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE ) if start_token is None: break _lowerCAmelCase = start_token.group(1 ) _lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE ) _lowerCAmelCase = start_token.group() if end_token is None: _lowerCAmelCase = tokens.replace(__snake_case , """""" ) else: _lowerCAmelCase = end_token.group() _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.escape(__snake_case ) _lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE ) if content is not None: _lowerCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case ) if value: if len(__snake_case ) == 1: _lowerCAmelCase = value[0] _lowerCAmelCase = value else: # leaf nodes _lowerCAmelCase = [] for leaf in content.split(R"""<sep/>""" ): _lowerCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(__snake_case ) if len(output[key] ) == 1: _lowerCAmelCase = output[key][0] _lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case ) if len(__snake_case ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , ) return self.image_processor_class @property def lowercase__ ( self : List[Any] ) -> Any: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , ) return self.image_processor
70
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : str ={ '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict =[ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class UpperCAmelCase : @staticmethod def lowercase__ ( *__snake_case : int , **__snake_case : str ) -> Dict: pass def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. A__ : str =( '''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png''' ) @is_pipeline_test @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): _lowercase: Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def lowercase__ ( self : Dict , __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any] ) -> List[Any]: _lowerCAmelCase = pipeline( """document-question-answering""" , model=__snake_case , tokenizer=__snake_case , image_processor=__snake_case ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = list(zip(*apply_tesseract(load_image(__snake_case ) , __snake_case , """""" ) ) ) _lowerCAmelCase = """What is the placebo?""" _lowerCAmelCase = [ { """image""": load_image(__snake_case ), """question""": question, }, { """image""": image, """question""": question, }, { """image""": image, """question""": question, """word_boxes""": word_boxes, }, ] return dqa_pipeline, examples def lowercase__ ( self : str , __snake_case : Dict , __snake_case : Any ) -> Dict: _lowerCAmelCase = dqa_pipeline(__snake_case , top_k=2 ) self.assertEqual( __snake_case , [ [ {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case ), """start""": ANY(__snake_case ), """end""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case ), """start""": ANY(__snake_case ), """end""": ANY(__snake_case )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def lowercase__ ( self : Optional[Any] ) -> Tuple: _lowerCAmelCase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = """How many cats are there?""" _lowerCAmelCase = [ {"""score""": 0.00_01, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39}, {"""score""": 0.00_01, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40}, ] _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual(nested_simplify(__snake_case , decimals=4 ) , __snake_case ) _lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual(nested_simplify(__snake_case , decimals=4 ) , __snake_case ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably _lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual(__snake_case , [] ) # We can optionnally pass directly the words and bounding boxes _lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , words=__snake_case , boxes=__snake_case , top_k=2 ) self.assertEqual(__snake_case , [] ) @slow @require_torch @require_detectrona @require_pytesseract def lowercase__ ( self : Dict ) -> Union[str, Any]: _lowerCAmelCase = pipeline( """document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = """What is the invoice number?""" _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) _lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) _lowerCAmelCase = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ [ {"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def lowercase__ ( self : List[str] ) -> List[Any]: _lowerCAmelCase = pipeline( """document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = """What is the invoice number?""" _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23}, {"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) _lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23}, {"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) _lowerCAmelCase = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ [ {"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23}, {"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def lowercase__ ( self : Tuple ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained( """impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__snake_case ) _lowerCAmelCase = pipeline( """document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__snake_case , revision="""3dc6de3""" , ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = """What is the invoice number?""" _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] , ) _lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] , ) _lowerCAmelCase = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ [ {"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] ] * 2 , ) _lowerCAmelCase = list(zip(*apply_tesseract(load_image(__snake_case ) , __snake_case , """""" ) ) ) # This model should also work if `image` is set to None _lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: _lowerCAmelCase = AutoTokenizer.from_pretrained( """impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__snake_case ) _lowerCAmelCase = pipeline( """document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__snake_case , revision="""3dc6de3""" , max_seq_len=50 , ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = """What is the invoice number?""" _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) _lowerCAmelCase = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ [ {"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16}, ] ] * 2 , ) _lowerCAmelCase = list(zip(*apply_tesseract(load_image(__snake_case ) , __snake_case , """""" ) ) ) # This model should also work if `image` is set to None _lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ {"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) @slow @require_torch def lowercase__ ( self : Dict ) -> Optional[int]: _lowerCAmelCase = pipeline( """document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , ) _lowerCAmelCase = INVOICE_URL _lowerCAmelCase = """What is the invoice number?""" _lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual(nested_simplify(__snake_case , decimals=4 ) , [{"""answer""": """us-001"""}] ) @require_tf @unittest.skip("""Document question answering not implemented in TF""" ) def lowercase__ ( self : str ) -> Optional[Any]: pass
70
'''simple docstring''' import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: _lowerCAmelCase = json.load(lowerCAmelCase ) for dpr_record in tqdm(lowerCAmelCase ): _lowerCAmelCase = dpr_record["""question"""] _lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" ) if __name__ == "__main__": main()
70
1
'''simple docstring''' import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class UpperCAmelCase ( snake_case_ ): def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any]=13 , __snake_case : List[str]=7 , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : str=False , __snake_case : str=True , __snake_case : Union[str, Any]=99 , __snake_case : Union[str, Any]=32 , __snake_case : Optional[int]=5 , __snake_case : Dict=4 , __snake_case : Union[str, Any]=37 , __snake_case : Tuple="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : Tuple=16 , __snake_case : Optional[int]=2 , __snake_case : Tuple=0.02 , __snake_case : Optional[int]=3 , __snake_case : str=4 , __snake_case : Any=None , ) -> Union[str, Any]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def lowercase__ ( self : Tuple ) -> Optional[Any]: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : List[str] ) -> Tuple: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowercase__ ( self : str , __snake_case : str , __snake_case : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : str ) -> Any: _lowerCAmelCase = DistilBertModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , __snake_case ) _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Tuple ) -> Optional[int]: _lowerCAmelCase = DistilBertForMaskedLM(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Any , __snake_case : Any ) -> int: _lowerCAmelCase = DistilBertForQuestionAnswering(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model( __snake_case , attention_mask=__snake_case , start_positions=__snake_case , end_positions=__snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = DistilBertForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Tuple ) -> str: _lowerCAmelCase = self.num_labels _lowerCAmelCase = DistilBertForTokenClassification(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Tuple ) -> int: _lowerCAmelCase = self.num_choices _lowerCAmelCase = DistilBertForMultipleChoice(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( __snake_case , attention_mask=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> Optional[Any]: _lowerCAmelCase = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: str = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _lowercase: Dict = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _lowercase: Any = True _lowercase: int = True _lowercase: Union[str, Any] = True _lowercase: List[Any] = True def lowercase__ ( self : Dict ) -> Optional[Any]: _lowerCAmelCase = DistilBertModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , dim=37 ) def lowercase__ ( self : List[str] ) -> Optional[Any]: self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Any: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__snake_case ) def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__snake_case ) def lowercase__ ( self : str ) -> List[str]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__snake_case ) def lowercase__ ( self : int ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__snake_case ) def lowercase__ ( self : Any ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__snake_case ) @slow def lowercase__ ( self : List[str] ) -> List[Any]: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = DistilBertModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) @slow @require_torch_gpu def lowercase__ ( self : List[Any] ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return _lowerCAmelCase = True _lowerCAmelCase = model_class(config=__snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case ) _lowerCAmelCase = torch.jit.trace( __snake_case , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__snake_case , os.path.join(__snake_case , """traced_model.pt""" ) ) _lowerCAmelCase = torch.jit.load(os.path.join(__snake_case , """traced_model.pt""" ) , map_location=__snake_case ) loaded(inputs_dict["""input_ids"""].to(__snake_case ) , inputs_dict["""attention_mask"""].to(__snake_case ) ) @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Any ) -> List[str]: _lowerCAmelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" ) _lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) _lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case )[0] _lowerCAmelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1